1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "libiberty.h"
29 #include "elf-vxworks.h"
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 static reloc_howto_type elf32_arm_howto_table_1[] =
76 HOWTO (R_ARM_NONE, /* type */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
80 FALSE, /* pc_relative */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
88 FALSE), /* pcrel_offset */
90 HOWTO (R_ARM_PC24, /* type */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
94 TRUE, /* pc_relative */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
109 FALSE, /* pc_relative */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
124 TRUE, /* pc_relative */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
139 TRUE, /* pc_relative */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
154 FALSE, /* pc_relative */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
169 FALSE, /* pc_relative */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
179 HOWTO (R_ARM_THM_ABS5, /* type */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
183 FALSE, /* pc_relative */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
194 HOWTO (R_ARM_ABS8, /* type */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
198 FALSE, /* pc_relative */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
208 HOWTO (R_ARM_SBREL32, /* type */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
212 FALSE, /* pc_relative */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
222 HOWTO (R_ARM_THM_CALL, /* type */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
226 TRUE, /* pc_relative */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
236 HOWTO (R_ARM_THM_PC8, /* type */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
240 TRUE, /* pc_relative */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
250 HOWTO (R_ARM_BREL_ADJ, /* type */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
254 FALSE, /* pc_relative */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
264 HOWTO (R_ARM_SWI24, /* type */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
268 FALSE, /* pc_relative */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
278 HOWTO (R_ARM_THM_SWI8, /* type */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
282 FALSE, /* pc_relative */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
297 TRUE, /* pc_relative */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
312 TRUE, /* pc_relative */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
322 /* Dynamic TLS relocations. */
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
328 FALSE, /* pc_relative */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
342 FALSE, /* pc_relative */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
356 FALSE, /* pc_relative */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
366 /* Relocs used in ARM Linux */
368 HOWTO (R_ARM_COPY, /* type */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
372 FALSE, /* pc_relative */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
382 HOWTO (R_ARM_GLOB_DAT, /* type */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
386 FALSE, /* pc_relative */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
400 FALSE, /* pc_relative */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
410 HOWTO (R_ARM_RELATIVE, /* type */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
414 FALSE, /* pc_relative */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
424 HOWTO (R_ARM_GOTOFF32, /* type */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
428 FALSE, /* pc_relative */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
438 HOWTO (R_ARM_GOTPC, /* type */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
442 TRUE, /* pc_relative */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
452 HOWTO (R_ARM_GOT32, /* type */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
456 FALSE, /* pc_relative */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
466 HOWTO (R_ARM_PLT32, /* type */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
470 TRUE, /* pc_relative */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
480 HOWTO (R_ARM_CALL, /* type */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
484 TRUE, /* pc_relative */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
494 HOWTO (R_ARM_JUMP24, /* type */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
498 TRUE, /* pc_relative */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
508 HOWTO (R_ARM_THM_JUMP24, /* type */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
512 TRUE, /* pc_relative */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
522 HOWTO (R_ARM_BASE_ABS, /* type */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
526 FALSE, /* pc_relative */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
540 TRUE, /* pc_relative */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
554 TRUE, /* pc_relative */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 TRUE, /* pc_relative */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
582 FALSE, /* pc_relative */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
596 FALSE, /* pc_relative */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
610 FALSE, /* pc_relative */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
620 HOWTO (R_ARM_TARGET1, /* type */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
624 FALSE, /* pc_relative */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
634 HOWTO (R_ARM_ROSEGREL32, /* type */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
638 FALSE, /* pc_relative */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
648 HOWTO (R_ARM_V4BX, /* type */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
652 FALSE, /* pc_relative */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
662 HOWTO (R_ARM_TARGET2, /* type */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
666 FALSE, /* pc_relative */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
676 HOWTO (R_ARM_PREL31, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE, /* pc_relative */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
694 FALSE, /* pc_relative */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
704 HOWTO (R_ARM_MOVT_ABS, /* type */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
708 FALSE, /* pc_relative */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
722 TRUE, /* pc_relative */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
732 HOWTO (R_ARM_MOVT_PREL, /* type */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
736 TRUE, /* pc_relative */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
750 FALSE, /* pc_relative */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
764 FALSE, /* pc_relative */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
778 TRUE, /* pc_relative */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
792 TRUE, /* pc_relative */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
802 HOWTO (R_ARM_THM_JUMP19, /* type */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
806 TRUE, /* pc_relative */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
816 HOWTO (R_ARM_THM_JUMP6, /* type */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
820 TRUE, /* pc_relative */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
837 TRUE, /* pc_relative */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
847 HOWTO (R_ARM_THM_PC12, /* type */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
851 TRUE, /* pc_relative */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
861 HOWTO (R_ARM_ABS32_NOI, /* type */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
865 FALSE, /* pc_relative */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
875 HOWTO (R_ARM_REL32_NOI, /* type */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
879 TRUE, /* pc_relative */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
889 /* Group relocations. */
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
895 TRUE, /* pc_relative */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
909 TRUE, /* pc_relative */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
923 TRUE, /* pc_relative */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
937 TRUE, /* pc_relative */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 TRUE, /* pc_relative */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
965 TRUE, /* pc_relative */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
979 TRUE, /* pc_relative */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
993 TRUE, /* pc_relative */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1007 TRUE, /* pc_relative */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1021 TRUE, /* pc_relative */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 TRUE, /* pc_relative */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1049 TRUE, /* pc_relative */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1063 TRUE, /* pc_relative */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1077 TRUE, /* pc_relative */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1091 TRUE, /* pc_relative */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1105 TRUE, /* pc_relative */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1119 TRUE, /* pc_relative */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1133 TRUE, /* pc_relative */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1147 TRUE, /* pc_relative */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1161 TRUE, /* pc_relative */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1175 TRUE, /* pc_relative */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1189 TRUE, /* pc_relative */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1203 TRUE, /* pc_relative */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1217 TRUE, /* pc_relative */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1231 TRUE, /* pc_relative */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1245 TRUE, /* pc_relative */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1259 TRUE, /* pc_relative */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1269 /* End of group relocations. */
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1275 FALSE, /* pc_relative */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1289 FALSE, /* pc_relative */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1303 FALSE, /* pc_relative */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1317 FALSE, /* pc_relative */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1331 FALSE, /* pc_relative */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1345 FALSE, /* pc_relative */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1355 EMPTY_HOWTO (90), /* Unallocated. */
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 FALSE, /* pc_relative */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 FALSE, /* pc_relative */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 TRUE, /* pc_relative */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 FALSE, /* pc_relative */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 FALSE, /* pc_relative */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1437 FALSE, /* pc_relative */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1445 FALSE), /* pcrel_offset */
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1452 FALSE, /* pc_relative */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1460 FALSE), /* pcrel_offset */
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1466 TRUE, /* pc_relative */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1480 TRUE, /* pc_relative */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1495 FALSE, /* pc_relative */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1509 FALSE, /* pc_relative */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1523 FALSE, /* pc_relative */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1537 FALSE, /* pc_relative */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 FALSE, /* pc_relative */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 FALSE, /* pc_relative */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 FALSE, /* pc_relative */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 FALSE, /* pc_relative */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1608 249-255 extended, currently unused, relocations: */
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1612 HOWTO (R_ARM_RREL32, /* type */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1616 FALSE, /* pc_relative */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1624 FALSE), /* pcrel_offset */
1626 HOWTO (R_ARM_RABS32, /* type */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1630 FALSE, /* pc_relative */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1638 FALSE), /* pcrel_offset */
1640 HOWTO (R_ARM_RPC24, /* type */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1644 FALSE, /* pc_relative */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1652 FALSE), /* pcrel_offset */
1654 HOWTO (R_ARM_RBASE, /* type */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1658 FALSE, /* pc_relative */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1666 FALSE) /* pcrel_offset */
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1686 unsigned int r_type;
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 struct elf32_arm_reloc_map
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1725 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1726 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1727 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1728 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1729 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1730 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1731 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1732 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1733 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1734 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1735 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1736 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1737 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1738 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1739 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1740 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1741 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1742 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1743 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1744 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1745 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1746 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1747 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1748 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1750 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1751 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1752 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1754 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1755 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1756 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1757 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1758 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1759 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1760 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1761 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1762 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1763 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1764 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1765 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1766 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1768 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1769 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1770 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1771 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1772 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1773 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1774 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1775 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1776 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1777 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1778 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 static reloc_howto_type *
1782 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1783 bfd_reloc_code_real_type code)
1787 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1788 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1789 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1794 static reloc_howto_type *
1795 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1800 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1801 if (elf32_arm_howto_table_1[i].name != NULL
1802 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1803 return &elf32_arm_howto_table_1[i];
1805 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1806 if (elf32_arm_howto_table_2[i].name != NULL
1807 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1808 return &elf32_arm_howto_table_2[i];
1813 /* Support for core dump NOTE sections. */
1816 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1821 switch (note->descsz)
1826 case 148: /* Linux/ARM 32-bit. */
1828 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1840 /* Make a ".reg/999" section. */
1841 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1842 size, note->descpos + offset);
1846 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1848 switch (note->descsz)
1853 case 124: /* Linux/ARM elf_prpsinfo. */
1854 elf_tdata (abfd)->core_program
1855 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1856 elf_tdata (abfd)->core_command
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 /* Note that for some reason, a spurious space is tacked
1861 onto the end of the args in some (at least one anyway)
1862 implementations, so strip it off if it exists. */
1864 char *command = elf_tdata (abfd)->core_command;
1865 int n = strlen (command);
1867 if (0 < n && command[n - 1] == ' ')
1868 command[n - 1] = '\0';
1874 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1875 #define TARGET_LITTLE_NAME "elf32-littlearm"
1876 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1877 #define TARGET_BIG_NAME "elf32-bigarm"
1879 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1880 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1882 typedef unsigned long int insn32;
1883 typedef unsigned short int insn16;
1885 /* In lieu of proper flags, assume all EABIv4 or later objects are
1887 #define INTERWORK_FLAG(abfd) \
1888 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1889 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1890 || ((abfd)->flags & BFD_LINKER_CREATED))
1892 /* The linker script knows the section names for placement.
1893 The entry_names are used to do simple name mangling on the stubs.
1894 Given a function name, and its type, the stub can be found. The
1895 name can be changed. The only requirement is the %s be present. */
1896 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1897 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1899 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1900 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1902 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1903 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1905 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1906 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1908 #define STUB_ENTRY_NAME "__%s_veneer"
1910 /* The name of the dynamic interpreter. This is put in the .interp
1912 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1914 #ifdef FOUR_WORD_PLT
1916 /* The first entry in a procedure linkage table looks like
1917 this. It is set up so that any shared library function that is
1918 called before the relocation has been set up calls the dynamic
1920 static const bfd_vma elf32_arm_plt0_entry [] =
1922 0xe52de004, /* str lr, [sp, #-4]! */
1923 0xe59fe010, /* ldr lr, [pc, #16] */
1924 0xe08fe00e, /* add lr, pc, lr */
1925 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 /* Subsequent entries in a procedure linkage table look like
1930 static const bfd_vma elf32_arm_plt_entry [] =
1932 0xe28fc600, /* add ip, pc, #NN */
1933 0xe28cca00, /* add ip, ip, #NN */
1934 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1935 0x00000000, /* unused */
1940 /* The first entry in a procedure linkage table looks like
1941 this. It is set up so that any shared library function that is
1942 called before the relocation has been set up calls the dynamic
1944 static const bfd_vma elf32_arm_plt0_entry [] =
1946 0xe52de004, /* str lr, [sp, #-4]! */
1947 0xe59fe004, /* ldr lr, [pc, #4] */
1948 0xe08fe00e, /* add lr, pc, lr */
1949 0xe5bef008, /* ldr pc, [lr, #8]! */
1950 0x00000000, /* &GOT[0] - . */
1953 /* Subsequent entries in a procedure linkage table look like
1955 static const bfd_vma elf32_arm_plt_entry [] =
1957 0xe28fc600, /* add ip, pc, #0xNN00000 */
1958 0xe28cca00, /* add ip, ip, #0xNN000 */
1959 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1964 /* The format of the first entry in the procedure linkage table
1965 for a VxWorks executable. */
1966 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1968 0xe52dc008, /* str ip,[sp,#-8]! */
1969 0xe59fc000, /* ldr ip,[pc] */
1970 0xe59cf008, /* ldr pc,[ip,#8] */
1971 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 /* The format of subsequent entries in a VxWorks executable. */
1975 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1977 0xe59fc000, /* ldr ip,[pc] */
1978 0xe59cf000, /* ldr pc,[ip] */
1979 0x00000000, /* .long @got */
1980 0xe59fc000, /* ldr ip,[pc] */
1981 0xea000000, /* b _PLT */
1982 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 /* The format of entries in a VxWorks shared library. */
1986 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1988 0xe59fc000, /* ldr ip,[pc] */
1989 0xe79cf009, /* ldr pc,[ip,r9] */
1990 0x00000000, /* .long @got */
1991 0xe59fc000, /* ldr ip,[pc] */
1992 0xe599f008, /* ldr pc,[r9,#8] */
1993 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1997 #define PLT_THUMB_STUB_SIZE 4
1998 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2004 /* The entries in a PLT when using a DLL-based target with multiple
2006 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2008 0xe51ff004, /* ldr pc, [pc, #-4] */
2009 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2013 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2014 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2015 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2016 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2017 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2027 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2028 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2029 is inserted in arm_build_one_stub(). */
2030 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2031 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2032 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2033 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2034 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2035 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2040 enum stub_insn_type type;
2041 unsigned int r_type;
2045 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2046 to reach the stub if necessary. */
2047 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2049 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2050 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2055 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2057 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2058 ARM_INSN(0xe12fff1c), /* bx ip */
2059 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2063 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2065 THUMB16_INSN(0xb401), /* push {r0} */
2066 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2067 THUMB16_INSN(0x4684), /* mov ip, r0 */
2068 THUMB16_INSN(0xbc01), /* pop {r0} */
2069 THUMB16_INSN(0x4760), /* bx ip */
2070 THUMB16_INSN(0xbf00), /* nop */
2071 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2076 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2078 THUMB16_INSN(0x4778), /* bx pc */
2079 THUMB16_INSN(0x46c0), /* nop */
2080 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2081 ARM_INSN(0xe12fff1c), /* bx ip */
2082 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2087 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2089 THUMB16_INSN(0x4778), /* bx pc */
2090 THUMB16_INSN(0x46c0), /* nop */
2091 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2092 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2096 one, when the destination is close enough. */
2097 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2099 THUMB16_INSN(0x4778), /* bx pc */
2100 THUMB16_INSN(0x46c0), /* nop */
2101 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2105 blx to reach the stub if necessary. */
2106 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2108 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2109 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2110 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2114 blx to reach the stub if necessary. We can not add into pc;
2115 it is not guaranteed to mode switch (different in ARMv6 and
2117 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2119 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2120 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2121 ARM_INSN(0xe12fff1c), /* bx ip */
2122 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 /* V4T ARM -> ARM long branch stub, PIC. */
2126 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2128 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2129 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2130 ARM_INSN(0xe12fff1c), /* bx ip */
2131 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 /* V4T Thumb -> ARM long branch stub, PIC. */
2135 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2137 THUMB16_INSN(0x4778), /* bx pc */
2138 THUMB16_INSN(0x46c0), /* nop */
2139 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2140 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2141 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2146 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2148 THUMB16_INSN(0xb401), /* push {r0} */
2149 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2150 THUMB16_INSN(0x46fc), /* mov ip, pc */
2151 THUMB16_INSN(0x4484), /* add ip, r0 */
2152 THUMB16_INSN(0xbc01), /* pop {r0} */
2153 THUMB16_INSN(0x4760), /* bx ip */
2154 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2159 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2161 THUMB16_INSN(0x4778), /* bx pc */
2162 THUMB16_INSN(0x46c0), /* nop */
2163 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2164 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2165 ARM_INSN(0xe12fff1c), /* bx ip */
2166 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 /* Cortex-A8 erratum-workaround stubs. */
2171 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2172 can't use a conditional branch to reach this stub). */
2174 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2176 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2177 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2178 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 /* Stub used for b.w and bl.w instructions. */
2183 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2185 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2190 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2194 instruction (which switches to ARM mode) to point to this stub. Jump to the
2195 real destination using an ARM-mode branch. */
2197 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2199 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 /* Section name for stubs is the associated section name plus this
2204 #define STUB_SUFFIX ".stub"
2206 /* One entry per long/short branch stub defined above. */
2208 DEF_STUB(long_branch_any_any) \
2209 DEF_STUB(long_branch_v4t_arm_thumb) \
2210 DEF_STUB(long_branch_thumb_only) \
2211 DEF_STUB(long_branch_v4t_thumb_thumb) \
2212 DEF_STUB(long_branch_v4t_thumb_arm) \
2213 DEF_STUB(short_branch_v4t_thumb_arm) \
2214 DEF_STUB(long_branch_any_arm_pic) \
2215 DEF_STUB(long_branch_any_thumb_pic) \
2216 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2219 DEF_STUB(long_branch_thumb_only_pic) \
2220 DEF_STUB(a8_veneer_b_cond) \
2221 DEF_STUB(a8_veneer_b) \
2222 DEF_STUB(a8_veneer_bl) \
2223 DEF_STUB(a8_veneer_blx)
2225 #define DEF_STUB(x) arm_stub_##x,
2226 enum elf32_arm_stub_type {
2229 /* Note the first a8_veneer type */
2230 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2236 const insn_sequence* template_sequence;
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2246 struct elf32_arm_stub_hash_entry
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2251 /* The stub section. */
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2294 /* Used to build a map of a section. This is required for mixed-endian
2297 typedef struct elf32_elf_section_map
2302 elf32_arm_section_map;
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2313 elf32_vfp11_erratum_type;
2315 typedef struct elf32_vfp11_erratum_list
2317 struct elf32_vfp11_erratum_list *next;
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2328 struct elf32_vfp11_erratum_list *branch;
2332 elf32_vfp11_erratum_type type;
2334 elf32_vfp11_erratum_list;
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2341 arm_unwind_edit_type;
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2352 struct arm_unwind_table_edit *next;
2354 arm_unwind_table_edit;
2356 typedef struct _arm_elf_section_data
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2369 /* Unwind info attached to a text section. */
2372 asection *arm_exidx_sec;
2375 /* Unwind info attached to an .ARM.exidx section. */
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2383 _arm_elf_section_data;
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2394 struct a8_erratum_fix {
2399 unsigned long orig_insn;
2401 enum elf32_arm_stub_type stub_type;
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 struct a8_erratum_reloc {
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2416 /* The size of the thread control block. */
2419 struct elf_arm_obj_tdata
2421 struct elf_obj_tdata root;
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2445 elf32_arm_mkobject (bfd *abfd)
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2476 struct elf_link_hash_entry root;
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2504 /* A pointer to the most recently used stub hash entry against this
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2524 /* Array to keep track of which stub sections have been created, and
2525 information on stub grouping. */
2528 /* This is the section to which stubs in the group will be
2531 /* The stub section. */
2535 /* ARM ELF linker hash table. */
2536 struct elf32_arm_link_hash_table
2538 /* The main hash table. */
2539 struct elf_link_hash_table root;
2541 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2542 bfd_size_type thumb_glue_size;
2544 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2545 bfd_size_type arm_glue_size;
2547 /* The size in bytes of section containing the ARMv4 BX veneers. */
2548 bfd_size_type bx_glue_size;
2550 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2551 veneer has been populated. */
2552 bfd_vma bx_glue_offset[15];
2554 /* The size in bytes of the section containing glue for VFP11 erratum
2556 bfd_size_type vfp11_erratum_glue_size;
2558 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2559 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2560 elf32_arm_write_section(). */
2561 struct a8_erratum_fix *a8_erratum_fixes;
2562 unsigned int num_a8_erratum_fixes;
2564 /* An arbitrary input BFD chosen to hold the glue sections. */
2565 bfd * bfd_of_glue_owner;
2567 /* Nonzero to output a BE8 image. */
2570 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2571 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2574 /* The relocation to use for R_ARM_TARGET2 relocations. */
2577 /* 0 = Ignore R_ARM_V4BX.
2578 1 = Convert BX to MOV PC.
2579 2 = Generate v4 interworing stubs. */
2582 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2585 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2588 /* What sort of code sequences we should look for which may trigger the
2589 VFP11 denorm erratum. */
2590 bfd_arm_vfp11_fix vfp11_fix;
2592 /* Global counter for the number of fixes we have emitted. */
2593 int num_vfp11_fixes;
2595 /* Nonzero to force PIC branch veneers. */
2598 /* The number of bytes in the initial entry in the PLT. */
2599 bfd_size_type plt_header_size;
2601 /* The number of bytes in the subsequent PLT etries. */
2602 bfd_size_type plt_entry_size;
2604 /* True if the target system is VxWorks. */
2607 /* True if the target system is Symbian OS. */
2610 /* True if the target uses REL relocations. */
2613 /* Short-cuts to get to dynamic linker sections. */
2622 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2625 /* Data for R_ARM_TLS_LDM32 relocations. */
2628 bfd_signed_vma refcount;
2632 /* Small local sym cache. */
2633 struct sym_cache sym_cache;
2635 /* For convenience in allocate_dynrelocs. */
2638 /* The stub hash table. */
2639 struct bfd_hash_table stub_hash_table;
2641 /* Linker stub bfd. */
2644 /* Linker call-backs. */
2645 asection * (*add_stub_section) (const char *, asection *);
2646 void (*layout_sections_again) (void);
2648 /* Array to keep track of which stub sections have been created, and
2649 information on stub grouping. */
2650 struct map_stub *stub_group;
2652 /* Assorted information used by elf32_arm_size_stubs. */
2653 unsigned int bfd_count;
2655 asection **input_list;
2658 /* Create an entry in an ARM ELF linker hash table. */
2660 static struct bfd_hash_entry *
2661 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2662 struct bfd_hash_table * table,
2663 const char * string)
2665 struct elf32_arm_link_hash_entry * ret =
2666 (struct elf32_arm_link_hash_entry *) entry;
2668 /* Allocate the structure if it has not already been allocated by a
2671 ret = (struct elf32_arm_link_hash_entry *)
2672 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2674 return (struct bfd_hash_entry *) ret;
2676 /* Call the allocation method of the superclass. */
2677 ret = ((struct elf32_arm_link_hash_entry *)
2678 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2682 ret->relocs_copied = NULL;
2683 ret->tls_type = GOT_UNKNOWN;
2684 ret->plt_thumb_refcount = 0;
2685 ret->plt_maybe_thumb_refcount = 0;
2686 ret->plt_got_offset = -1;
2687 ret->export_glue = NULL;
2689 ret->stub_cache = NULL;
2692 return (struct bfd_hash_entry *) ret;
2695 /* Initialize an entry in the stub hash table. */
2697 static struct bfd_hash_entry *
2698 stub_hash_newfunc (struct bfd_hash_entry *entry,
2699 struct bfd_hash_table *table,
2702 /* Allocate the structure if it has not already been allocated by a
2706 entry = (struct bfd_hash_entry *)
2707 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2712 /* Call the allocation method of the superclass. */
2713 entry = bfd_hash_newfunc (entry, table, string);
2716 struct elf32_arm_stub_hash_entry *eh;
2718 /* Initialize the local fields. */
2719 eh = (struct elf32_arm_stub_hash_entry *) entry;
2720 eh->stub_sec = NULL;
2721 eh->stub_offset = 0;
2722 eh->target_value = 0;
2723 eh->target_section = NULL;
2724 eh->target_addend = 0;
2726 eh->stub_type = arm_stub_none;
2728 eh->stub_template = NULL;
2729 eh->stub_template_size = 0;
2732 eh->output_name = NULL;
2738 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2739 shortcuts to them in our hash table. */
2742 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2744 struct elf32_arm_link_hash_table *htab;
2746 htab = elf32_arm_hash_table (info);
2747 /* BPABI objects never have a GOT, or associated sections. */
2748 if (htab->symbian_p)
2751 if (! _bfd_elf_create_got_section (dynobj, info))
2754 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2755 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2756 if (!htab->sgot || !htab->sgotplt)
2759 htab->srelgot = bfd_get_section_by_name (dynobj,
2760 RELOC_SECTION (htab, ".got"));
2761 if (htab->srelgot == NULL)
2766 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2767 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2771 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2773 struct elf32_arm_link_hash_table *htab;
2775 htab = elf32_arm_hash_table (info);
2776 if (!htab->sgot && !create_got_section (dynobj, info))
2779 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2782 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2783 htab->srelplt = bfd_get_section_by_name (dynobj,
2784 RELOC_SECTION (htab, ".plt"));
2785 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2787 htab->srelbss = bfd_get_section_by_name (dynobj,
2788 RELOC_SECTION (htab, ".bss"));
2790 if (htab->vxworks_p)
2792 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2797 htab->plt_header_size = 0;
2798 htab->plt_entry_size
2799 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2803 htab->plt_header_size
2804 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2805 htab->plt_entry_size
2806 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2813 || (!info->shared && !htab->srelbss))
2819 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2822 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2823 struct elf_link_hash_entry *dir,
2824 struct elf_link_hash_entry *ind)
2826 struct elf32_arm_link_hash_entry *edir, *eind;
2828 edir = (struct elf32_arm_link_hash_entry *) dir;
2829 eind = (struct elf32_arm_link_hash_entry *) ind;
2831 if (eind->relocs_copied != NULL)
2833 if (edir->relocs_copied != NULL)
2835 struct elf32_arm_relocs_copied **pp;
2836 struct elf32_arm_relocs_copied *p;
2838 /* Add reloc counts against the indirect sym to the direct sym
2839 list. Merge any entries against the same section. */
2840 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2842 struct elf32_arm_relocs_copied *q;
2844 for (q = edir->relocs_copied; q != NULL; q = q->next)
2845 if (q->section == p->section)
2847 q->pc_count += p->pc_count;
2848 q->count += p->count;
2855 *pp = edir->relocs_copied;
2858 edir->relocs_copied = eind->relocs_copied;
2859 eind->relocs_copied = NULL;
2862 if (ind->root.type == bfd_link_hash_indirect)
2864 /* Copy over PLT info. */
2865 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2866 eind->plt_thumb_refcount = 0;
2867 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2868 eind->plt_maybe_thumb_refcount = 0;
2870 if (dir->got.refcount <= 0)
2872 edir->tls_type = eind->tls_type;
2873 eind->tls_type = GOT_UNKNOWN;
2877 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2880 /* Create an ARM elf linker hash table. */
2882 static struct bfd_link_hash_table *
2883 elf32_arm_link_hash_table_create (bfd *abfd)
2885 struct elf32_arm_link_hash_table *ret;
2886 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2888 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2892 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2893 elf32_arm_link_hash_newfunc,
2894 sizeof (struct elf32_arm_link_hash_entry)))
2901 ret->sgotplt = NULL;
2902 ret->srelgot = NULL;
2904 ret->srelplt = NULL;
2905 ret->sdynbss = NULL;
2906 ret->srelbss = NULL;
2907 ret->srelplt2 = NULL;
2908 ret->thumb_glue_size = 0;
2909 ret->arm_glue_size = 0;
2910 ret->bx_glue_size = 0;
2911 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2912 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2913 ret->vfp11_erratum_glue_size = 0;
2914 ret->num_vfp11_fixes = 0;
2915 ret->fix_cortex_a8 = 0;
2916 ret->bfd_of_glue_owner = NULL;
2917 ret->byteswap_code = 0;
2918 ret->target1_is_rel = 0;
2919 ret->target2_reloc = R_ARM_NONE;
2920 #ifdef FOUR_WORD_PLT
2921 ret->plt_header_size = 16;
2922 ret->plt_entry_size = 16;
2924 ret->plt_header_size = 20;
2925 ret->plt_entry_size = 12;
2932 ret->sym_cache.abfd = NULL;
2934 ret->tls_ldm_got.refcount = 0;
2935 ret->stub_bfd = NULL;
2936 ret->add_stub_section = NULL;
2937 ret->layout_sections_again = NULL;
2938 ret->stub_group = NULL;
2941 ret->input_list = NULL;
2943 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2944 sizeof (struct elf32_arm_stub_hash_entry)))
2950 return &ret->root.root;
2953 /* Free the derived linker hash table. */
2956 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2958 struct elf32_arm_link_hash_table *ret
2959 = (struct elf32_arm_link_hash_table *) hash;
2961 bfd_hash_table_free (&ret->stub_hash_table);
2962 _bfd_generic_link_hash_table_free (hash);
2965 /* Determine if we're dealing with a Thumb only architecture. */
2968 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2970 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2974 if (arch != TAG_CPU_ARCH_V7)
2977 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2978 Tag_CPU_arch_profile);
2980 return profile == 'M';
2983 /* Determine if we're dealing with a Thumb-2 object. */
2986 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2988 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2990 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2993 /* Determine what kind of NOPs are available. */
2996 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
2998 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3000 return arch == TAG_CPU_ARCH_V6T2
3001 || arch == TAG_CPU_ARCH_V6K
3002 || arch == TAG_CPU_ARCH_V7;
3006 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3008 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3010 return arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7;
3014 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3018 case arm_stub_long_branch_thumb_only:
3019 case arm_stub_long_branch_v4t_thumb_arm:
3020 case arm_stub_short_branch_v4t_thumb_arm:
3021 case arm_stub_long_branch_v4t_thumb_arm_pic:
3022 case arm_stub_long_branch_thumb_only_pic:
3033 /* Determine the type of stub needed, if any, for a call. */
3035 static enum elf32_arm_stub_type
3036 arm_type_of_stub (struct bfd_link_info *info,
3037 asection *input_sec,
3038 const Elf_Internal_Rela *rel,
3039 unsigned char st_type,
3040 struct elf32_arm_link_hash_entry *hash,
3041 bfd_vma destination,
3047 bfd_signed_vma branch_offset;
3048 unsigned int r_type;
3049 struct elf32_arm_link_hash_table * globals;
3052 enum elf32_arm_stub_type stub_type = arm_stub_none;
3055 /* We don't know the actual type of destination in case it is of
3056 type STT_SECTION: give up. */
3057 if (st_type == STT_SECTION)
3060 globals = elf32_arm_hash_table (info);
3062 thumb_only = using_thumb_only (globals);
3064 thumb2 = using_thumb2 (globals);
3066 /* Determine where the call point is. */
3067 location = (input_sec->output_offset
3068 + input_sec->output_section->vma
3071 branch_offset = (bfd_signed_vma)(destination - location);
3073 r_type = ELF32_R_TYPE (rel->r_info);
3075 /* Keep a simpler condition, for the sake of clarity. */
3076 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3079 /* Note when dealing with PLT entries: the main PLT stub is in
3080 ARM mode, so if the branch is in Thumb mode, another
3081 Thumb->ARM stub will be inserted later just before the ARM
3082 PLT stub. We don't take this extra distance into account
3083 here, because if a long branch stub is needed, we'll add a
3084 Thumb->Arm one and branch directly to the ARM PLT entry
3085 because it avoids spreading offset corrections in several
3089 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3091 /* Handle cases where:
3092 - this call goes too far (different Thumb/Thumb2 max
3094 - it's a Thumb->Arm call and blx is not available, or it's a
3095 Thumb->Arm branch (not bl). A stub is needed in this case,
3096 but only if this call is not through a PLT entry. Indeed,
3097 PLT stubs handle mode switching already.
3100 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3101 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3103 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3104 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3105 || ((st_type != STT_ARM_TFUNC)
3106 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3107 || (r_type == R_ARM_THM_JUMP24))
3110 if (st_type == STT_ARM_TFUNC)
3112 /* Thumb to thumb. */
3115 stub_type = (info->shared | globals->pic_veneer)
3117 ? ((globals->use_blx
3118 && (r_type ==R_ARM_THM_CALL))
3119 /* V5T and above. Stub starts with ARM code, so
3120 we must be able to switch mode before
3121 reaching it, which is only possible for 'bl'
3122 (ie R_ARM_THM_CALL relocation). */
3123 ? arm_stub_long_branch_any_thumb_pic
3124 /* On V4T, use Thumb code only. */
3125 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3127 /* non-PIC stubs. */
3128 : ((globals->use_blx
3129 && (r_type ==R_ARM_THM_CALL))
3130 /* V5T and above. */
3131 ? arm_stub_long_branch_any_any
3133 : arm_stub_long_branch_v4t_thumb_thumb);
3137 stub_type = (info->shared | globals->pic_veneer)
3139 ? arm_stub_long_branch_thumb_only_pic
3141 : arm_stub_long_branch_thumb_only;
3148 && sym_sec->owner != NULL
3149 && !INTERWORK_FLAG (sym_sec->owner))
3151 (*_bfd_error_handler)
3152 (_("%B(%s): warning: interworking not enabled.\n"
3153 " first occurrence: %B: Thumb call to ARM"),
3154 sym_sec->owner, input_bfd, name);
3157 stub_type = (info->shared | globals->pic_veneer)
3159 ? ((globals->use_blx
3160 && (r_type ==R_ARM_THM_CALL))
3161 /* V5T and above. */
3162 ? arm_stub_long_branch_any_arm_pic
3164 : arm_stub_long_branch_v4t_thumb_arm_pic)
3166 /* non-PIC stubs. */
3167 : ((globals->use_blx
3168 && (r_type ==R_ARM_THM_CALL))
3169 /* V5T and above. */
3170 ? arm_stub_long_branch_any_any
3172 : arm_stub_long_branch_v4t_thumb_arm);
3174 /* Handle v4t short branches. */
3175 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3176 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3177 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3178 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3182 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3184 if (st_type == STT_ARM_TFUNC)
3189 && sym_sec->owner != NULL
3190 && !INTERWORK_FLAG (sym_sec->owner))
3192 (*_bfd_error_handler)
3193 (_("%B(%s): warning: interworking not enabled.\n"
3194 " first occurrence: %B: ARM call to Thumb"),
3195 sym_sec->owner, input_bfd, name);
3198 /* We have an extra 2-bytes reach because of
3199 the mode change (bit 24 (H) of BLX encoding). */
3200 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3201 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3202 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3203 || (r_type == R_ARM_JUMP24)
3204 || (r_type == R_ARM_PLT32))
3206 stub_type = (info->shared | globals->pic_veneer)
3208 ? ((globals->use_blx)
3209 /* V5T and above. */
3210 ? arm_stub_long_branch_any_thumb_pic
3212 : arm_stub_long_branch_v4t_arm_thumb_pic)
3214 /* non-PIC stubs. */
3215 : ((globals->use_blx)
3216 /* V5T and above. */
3217 ? arm_stub_long_branch_any_any
3219 : arm_stub_long_branch_v4t_arm_thumb);
3225 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3226 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3228 stub_type = (info->shared | globals->pic_veneer)
3230 ? arm_stub_long_branch_any_arm_pic
3231 /* non-PIC stubs. */
3232 : arm_stub_long_branch_any_any;
3240 /* Build a name for an entry in the stub hash table. */
3243 elf32_arm_stub_name (const asection *input_section,
3244 const asection *sym_sec,
3245 const struct elf32_arm_link_hash_entry *hash,
3246 const Elf_Internal_Rela *rel)
3253 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3254 stub_name = (char *) bfd_malloc (len);
3255 if (stub_name != NULL)
3256 sprintf (stub_name, "%08x_%s+%x",
3257 input_section->id & 0xffffffff,
3258 hash->root.root.root.string,
3259 (int) rel->r_addend & 0xffffffff);
3263 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3264 stub_name = (char *) bfd_malloc (len);
3265 if (stub_name != NULL)
3266 sprintf (stub_name, "%08x_%x:%x+%x",
3267 input_section->id & 0xffffffff,
3268 sym_sec->id & 0xffffffff,
3269 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3270 (int) rel->r_addend & 0xffffffff);
3276 /* Look up an entry in the stub hash. Stub entries are cached because
3277 creating the stub name takes a bit of time. */
3279 static struct elf32_arm_stub_hash_entry *
3280 elf32_arm_get_stub_entry (const asection *input_section,
3281 const asection *sym_sec,
3282 struct elf_link_hash_entry *hash,
3283 const Elf_Internal_Rela *rel,
3284 struct elf32_arm_link_hash_table *htab)
3286 struct elf32_arm_stub_hash_entry *stub_entry;
3287 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3288 const asection *id_sec;
3290 if ((input_section->flags & SEC_CODE) == 0)
3293 /* If this input section is part of a group of sections sharing one
3294 stub section, then use the id of the first section in the group.
3295 Stub names need to include a section id, as there may well be
3296 more than one stub used to reach say, printf, and we need to
3297 distinguish between them. */
3298 id_sec = htab->stub_group[input_section->id].link_sec;
3300 if (h != NULL && h->stub_cache != NULL
3301 && h->stub_cache->h == h
3302 && h->stub_cache->id_sec == id_sec)
3304 stub_entry = h->stub_cache;
3310 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3311 if (stub_name == NULL)
3314 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3315 stub_name, FALSE, FALSE);
3317 h->stub_cache = stub_entry;
3325 /* Find or create a stub section. Returns a pointer to the stub section, and
3326 the section to which the stub section will be attached (in *LINK_SEC_P).
3327 LINK_SEC_P may be NULL. */
3330 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3331 struct elf32_arm_link_hash_table *htab)
3336 link_sec = htab->stub_group[section->id].link_sec;
3337 stub_sec = htab->stub_group[section->id].stub_sec;
3338 if (stub_sec == NULL)
3340 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3341 if (stub_sec == NULL)
3347 namelen = strlen (link_sec->name);
3348 len = namelen + sizeof (STUB_SUFFIX);
3349 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3353 memcpy (s_name, link_sec->name, namelen);
3354 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3355 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3356 if (stub_sec == NULL)
3358 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3360 htab->stub_group[section->id].stub_sec = stub_sec;
3364 *link_sec_p = link_sec;
3369 /* Add a new stub entry to the stub hash. Not all fields of the new
3370 stub entry are initialised. */
3372 static struct elf32_arm_stub_hash_entry *
3373 elf32_arm_add_stub (const char *stub_name,
3375 struct elf32_arm_link_hash_table *htab)
3379 struct elf32_arm_stub_hash_entry *stub_entry;
3381 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3382 if (stub_sec == NULL)
3385 /* Enter this entry into the linker stub hash table. */
3386 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3388 if (stub_entry == NULL)
3390 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3396 stub_entry->stub_sec = stub_sec;
3397 stub_entry->stub_offset = 0;
3398 stub_entry->id_sec = link_sec;
3403 /* Store an Arm insn into an output section not processed by
3404 elf32_arm_write_section. */
3407 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3408 bfd * output_bfd, bfd_vma val, void * ptr)
3410 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3411 bfd_putl32 (val, ptr);
3413 bfd_putb32 (val, ptr);
3416 /* Store a 16-bit Thumb insn into an output section not processed by
3417 elf32_arm_write_section. */
3420 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3421 bfd * output_bfd, bfd_vma val, void * ptr)
3423 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3424 bfd_putl16 (val, ptr);
3426 bfd_putb16 (val, ptr);
3429 static bfd_reloc_status_type elf32_arm_final_link_relocate
3430 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3431 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3432 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3435 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3439 struct elf32_arm_stub_hash_entry *stub_entry;
3440 struct bfd_link_info *info;
3441 struct elf32_arm_link_hash_table *htab;
3449 const insn_sequence *template_sequence;
3451 struct elf32_arm_link_hash_table * globals;
3452 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3453 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3456 /* Massage our args to the form they really have. */
3457 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3458 info = (struct bfd_link_info *) in_arg;
3460 globals = elf32_arm_hash_table (info);
3462 htab = elf32_arm_hash_table (info);
3463 stub_sec = stub_entry->stub_sec;
3465 if ((htab->fix_cortex_a8 < 0)
3466 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3467 /* We have to do the a8 fixes last, as they are less aligned than
3468 the other veneers. */
3471 /* Make a note of the offset within the stubs for this entry. */
3472 stub_entry->stub_offset = stub_sec->size;
3473 loc = stub_sec->contents + stub_entry->stub_offset;
3475 stub_bfd = stub_sec->owner;
3477 /* This is the address of the start of the stub. */
3478 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3479 + stub_entry->stub_offset;
3481 /* This is the address of the stub destination. */
3482 sym_value = (stub_entry->target_value
3483 + stub_entry->target_section->output_offset
3484 + stub_entry->target_section->output_section->vma);
3486 template_sequence = stub_entry->stub_template;
3487 template_size = stub_entry->stub_template_size;
3490 for (i = 0; i < template_size; i++)
3492 switch (template_sequence[i].type)
3496 bfd_vma data = (bfd_vma) template_sequence[i].data;
3497 if (template_sequence[i].reloc_addend != 0)
3499 /* We've borrowed the reloc_addend field to mean we should
3500 insert a condition code into this (Thumb-1 branch)
3501 instruction. See THUMB16_BCOND_INSN. */
3502 BFD_ASSERT ((data & 0xff00) == 0xd000);
3503 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3505 put_thumb_insn (globals, stub_bfd, data, loc + size);
3511 put_thumb_insn (globals, stub_bfd,
3512 (template_sequence[i].data >> 16) & 0xffff,
3514 put_thumb_insn (globals, stub_bfd, template_sequence[i].data & 0xffff,
3516 if (template_sequence[i].r_type != R_ARM_NONE)
3518 stub_reloc_idx[nrelocs] = i;
3519 stub_reloc_offset[nrelocs++] = size;
3525 put_arm_insn (globals, stub_bfd, template_sequence[i].data,
3527 /* Handle cases where the target is encoded within the
3529 if (template_sequence[i].r_type == R_ARM_JUMP24)
3531 stub_reloc_idx[nrelocs] = i;
3532 stub_reloc_offset[nrelocs++] = size;
3538 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3539 stub_reloc_idx[nrelocs] = i;
3540 stub_reloc_offset[nrelocs++] = size;
3550 stub_sec->size += size;
3552 /* Stub size has already been computed in arm_size_one_stub. Check
3554 BFD_ASSERT (size == stub_entry->stub_size);
3556 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3557 if (stub_entry->st_type == STT_ARM_TFUNC)
3560 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3562 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3564 for (i = 0; i < nrelocs; i++)
3565 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3566 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3567 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3568 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3570 Elf_Internal_Rela rel;
3571 bfd_boolean unresolved_reloc;
3572 char *error_message;
3574 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3575 ? STT_ARM_TFUNC : 0;
3576 bfd_vma points_to = sym_value + stub_entry->target_addend;
3578 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3579 rel.r_info = ELF32_R_INFO (0,
3580 template_sequence[stub_reloc_idx[i]].r_type);
3581 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3583 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3584 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3585 template should refer back to the instruction after the original
3587 points_to = sym_value;
3589 /* There may be unintended consequences if this is not true. */
3590 BFD_ASSERT (stub_entry->h == NULL);
3592 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3593 properly. We should probably use this function unconditionally,
3594 rather than only for certain relocations listed in the enclosing
3595 conditional, for the sake of consistency. */
3596 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3597 (template_sequence[stub_reloc_idx[i]].r_type),
3598 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3599 points_to, info, stub_entry->target_section, "", sym_flags,
3600 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3605 _bfd_final_link_relocate (elf32_arm_howto_from_type
3606 (template_sequence[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3607 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3608 sym_value + stub_entry->target_addend,
3609 template_sequence[stub_reloc_idx[i]].reloc_addend);
3616 /* Calculate the template, template size and instruction size for a stub.
3617 Return value is the instruction size. */
3620 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3621 const insn_sequence **stub_template,
3622 int *stub_template_size)
3624 const insn_sequence *template_sequence = NULL;
3625 int template_size = 0, i;
3628 template_sequence = stub_definitions[stub_type].template_sequence;
3629 template_size = stub_definitions[stub_type].template_size;
3632 for (i = 0; i < template_size; i++)
3634 switch (template_sequence[i].type)
3653 *stub_template = template_sequence;
3655 if (stub_template_size)
3656 *stub_template_size = template_size;
3661 /* As above, but don't actually build the stub. Just bump offset so
3662 we know stub section sizes. */
3665 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3668 struct elf32_arm_stub_hash_entry *stub_entry;
3669 struct elf32_arm_link_hash_table *htab;
3670 const insn_sequence *template_sequence;
3671 int template_size, size;
3673 /* Massage our args to the form they really have. */
3674 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3675 htab = (struct elf32_arm_link_hash_table *) in_arg;
3677 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3678 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3680 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3683 stub_entry->stub_size = size;
3684 stub_entry->stub_template = template_sequence;
3685 stub_entry->stub_template_size = template_size;
3687 size = (size + 7) & ~7;
3688 stub_entry->stub_sec->size += size;
3693 /* External entry points for sizing and building linker stubs. */
3695 /* Set up various things so that we can make a list of input sections
3696 for each output section included in the link. Returns -1 on error,
3697 0 when no stubs will be needed, and 1 on success. */
3700 elf32_arm_setup_section_lists (bfd *output_bfd,
3701 struct bfd_link_info *info)
3704 unsigned int bfd_count;
3705 int top_id, top_index;
3707 asection **input_list, **list;
3709 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3711 if (! is_elf_hash_table (htab))
3714 /* Count the number of input BFDs and find the top input section id. */
3715 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3717 input_bfd = input_bfd->link_next)
3720 for (section = input_bfd->sections;
3722 section = section->next)
3724 if (top_id < section->id)
3725 top_id = section->id;
3728 htab->bfd_count = bfd_count;
3730 amt = sizeof (struct map_stub) * (top_id + 1);
3731 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3732 if (htab->stub_group == NULL)
3735 /* We can't use output_bfd->section_count here to find the top output
3736 section index as some sections may have been removed, and
3737 _bfd_strip_section_from_output doesn't renumber the indices. */
3738 for (section = output_bfd->sections, top_index = 0;
3740 section = section->next)
3742 if (top_index < section->index)
3743 top_index = section->index;
3746 htab->top_index = top_index;
3747 amt = sizeof (asection *) * (top_index + 1);
3748 input_list = (asection **) bfd_malloc (amt);
3749 htab->input_list = input_list;
3750 if (input_list == NULL)
3753 /* For sections we aren't interested in, mark their entries with a
3754 value we can check later. */
3755 list = input_list + top_index;
3757 *list = bfd_abs_section_ptr;
3758 while (list-- != input_list);
3760 for (section = output_bfd->sections;
3762 section = section->next)
3764 if ((section->flags & SEC_CODE) != 0)
3765 input_list[section->index] = NULL;
3771 /* The linker repeatedly calls this function for each input section,
3772 in the order that input sections are linked into output sections.
3773 Build lists of input sections to determine groupings between which
3774 we may insert linker stubs. */
3777 elf32_arm_next_input_section (struct bfd_link_info *info,
3780 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3782 if (isec->output_section->index <= htab->top_index)
3784 asection **list = htab->input_list + isec->output_section->index;
3786 if (*list != bfd_abs_section_ptr)
3788 /* Steal the link_sec pointer for our list. */
3789 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3790 /* This happens to make the list in reverse order,
3791 which we reverse later. */
3792 PREV_SEC (isec) = *list;
3798 /* See whether we can group stub sections together. Grouping stub
3799 sections may result in fewer stubs. More importantly, we need to
3800 put all .init* and .fini* stubs at the end of the .init or
3801 .fini output sections respectively, because glibc splits the
3802 _init and _fini functions into multiple parts. Putting a stub in
3803 the middle of a function is not a good idea. */
3806 group_sections (struct elf32_arm_link_hash_table *htab,
3807 bfd_size_type stub_group_size,
3808 bfd_boolean stubs_always_after_branch)
3810 asection **list = htab->input_list;
3814 asection *tail = *list;
3817 if (tail == bfd_abs_section_ptr)
3820 /* Reverse the list: we must avoid placing stubs at the
3821 beginning of the section because the beginning of the text
3822 section may be required for an interrupt vector in bare metal
3824 #define NEXT_SEC PREV_SEC
3826 while (tail != NULL)
3828 /* Pop from tail. */
3829 asection *item = tail;
3830 tail = PREV_SEC (item);
3833 NEXT_SEC (item) = head;
3837 while (head != NULL)
3841 bfd_vma stub_group_start = head->output_offset;
3842 bfd_vma end_of_next;
3845 while (NEXT_SEC (curr) != NULL)
3847 next = NEXT_SEC (curr);
3848 end_of_next = next->output_offset + next->size;
3849 if (end_of_next - stub_group_start >= stub_group_size)
3850 /* End of NEXT is too far from start, so stop. */
3852 /* Add NEXT to the group. */
3856 /* OK, the size from the start to the start of CURR is less
3857 than stub_group_size and thus can be handled by one stub
3858 section. (Or the head section is itself larger than
3859 stub_group_size, in which case we may be toast.)
3860 We should really be keeping track of the total size of
3861 stubs added here, as stubs contribute to the final output
3865 next = NEXT_SEC (head);
3866 /* Set up this stub group. */
3867 htab->stub_group[head->id].link_sec = curr;
3869 while (head != curr && (head = next) != NULL);
3871 /* But wait, there's more! Input sections up to stub_group_size
3872 bytes after the stub section can be handled by it too. */
3873 if (!stubs_always_after_branch)
3875 stub_group_start = curr->output_offset + curr->size;
3877 while (next != NULL)
3879 end_of_next = next->output_offset + next->size;
3880 if (end_of_next - stub_group_start >= stub_group_size)
3881 /* End of NEXT is too far from stubs, so stop. */
3883 /* Add NEXT to the stub group. */
3885 next = NEXT_SEC (head);
3886 htab->stub_group[head->id].link_sec = curr;
3892 while (list++ != htab->input_list + htab->top_index);
3894 free (htab->input_list);
3899 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3903 a8_reloc_compare (const void *a, const void *b)
3905 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3906 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3908 if (ra->from < rb->from)
3910 else if (ra->from > rb->from)
3916 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3917 const char *, char **);
3919 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3920 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3921 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3925 cortex_a8_erratum_scan (bfd *input_bfd,
3926 struct bfd_link_info *info,
3927 struct a8_erratum_fix **a8_fixes_p,
3928 unsigned int *num_a8_fixes_p,
3929 unsigned int *a8_fix_table_size_p,
3930 struct a8_erratum_reloc *a8_relocs,
3931 unsigned int num_a8_relocs,
3932 unsigned prev_num_a8_fixes,
3933 bfd_boolean *stub_changed_p)
3936 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3937 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3938 unsigned int num_a8_fixes = *num_a8_fixes_p;
3939 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3941 for (section = input_bfd->sections;
3943 section = section->next)
3945 bfd_byte *contents = NULL;
3946 struct _arm_elf_section_data *sec_data;
3950 if (elf_section_type (section) != SHT_PROGBITS
3951 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3952 || (section->flags & SEC_EXCLUDE) != 0
3953 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3954 || (section->output_section == bfd_abs_section_ptr))
3957 base_vma = section->output_section->vma + section->output_offset;
3959 if (elf_section_data (section)->this_hdr.contents != NULL)
3960 contents = elf_section_data (section)->this_hdr.contents;
3961 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3964 sec_data = elf32_arm_section_data (section);
3966 for (span = 0; span < sec_data->mapcount; span++)
3968 unsigned int span_start = sec_data->map[span].vma;
3969 unsigned int span_end = (span == sec_data->mapcount - 1)
3970 ? section->size : sec_data->map[span + 1].vma;
3972 char span_type = sec_data->map[span].type;
3973 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3975 if (span_type != 't')
3978 /* Span is entirely within a single 4KB region: skip scanning. */
3979 if (((base_vma + span_start) & ~0xfff)
3980 == ((base_vma + span_end) & ~0xfff))
3983 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3985 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3986 * The branch target is in the same 4KB region as the
3987 first half of the branch.
3988 * The instruction before the branch is a 32-bit
3989 length non-branch instruction. */
3990 for (i = span_start; i < span_end;)
3992 unsigned int insn = bfd_getl16 (&contents[i]);
3993 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3994 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3996 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4001 /* Load the rest of the insn (in manual-friendly order). */
4002 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4004 /* Encoding T4: B<c>.W. */
4005 is_b = (insn & 0xf800d000) == 0xf0009000;
4006 /* Encoding T1: BL<c>.W. */
4007 is_bl = (insn & 0xf800d000) == 0xf000d000;
4008 /* Encoding T2: BLX<c>.W. */
4009 is_blx = (insn & 0xf800d000) == 0xf000c000;
4010 /* Encoding T3: B<c>.W (not permitted in IT block). */
4011 is_bcc = (insn & 0xf800d000) == 0xf0008000
4012 && (insn & 0x07f00000) != 0x03800000;
4015 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4017 if (((base_vma + i) & 0xfff) == 0xffe
4021 && ! last_was_branch)
4023 bfd_signed_vma offset;
4024 bfd_boolean force_target_arm = FALSE;
4025 bfd_boolean force_target_thumb = FALSE;
4027 enum elf32_arm_stub_type stub_type = arm_stub_none;
4028 struct a8_erratum_reloc key, *found;
4030 key.from = base_vma + i;
4031 found = (struct a8_erratum_reloc *)
4032 bsearch (&key, a8_relocs, num_a8_relocs,
4033 sizeof (struct a8_erratum_reloc),
4038 char *error_message = NULL;
4039 struct elf_link_hash_entry *entry;
4041 /* We don't care about the error returned from this
4042 function, only if there is glue or not. */
4043 entry = find_thumb_glue (info, found->sym_name,
4047 found->non_a8_stub = TRUE;
4049 if (found->r_type == R_ARM_THM_CALL
4050 && found->st_type != STT_ARM_TFUNC)
4051 force_target_arm = TRUE;
4052 else if (found->r_type == R_ARM_THM_CALL
4053 && found->st_type == STT_ARM_TFUNC)
4054 force_target_thumb = TRUE;
4057 /* Check if we have an offending branch instruction. */
4059 if (found && found->non_a8_stub)
4060 /* We've already made a stub for this instruction, e.g.
4061 it's a long branch or a Thumb->ARM stub. Assume that
4062 stub will suffice to work around the A8 erratum (see
4063 setting of always_after_branch above). */
4067 offset = (insn & 0x7ff) << 1;
4068 offset |= (insn & 0x3f0000) >> 4;
4069 offset |= (insn & 0x2000) ? 0x40000 : 0;
4070 offset |= (insn & 0x800) ? 0x80000 : 0;
4071 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4072 if (offset & 0x100000)
4073 offset |= ~ ((bfd_signed_vma) 0xfffff);
4074 stub_type = arm_stub_a8_veneer_b_cond;
4076 else if (is_b || is_bl || is_blx)
4078 int s = (insn & 0x4000000) != 0;
4079 int j1 = (insn & 0x2000) != 0;
4080 int j2 = (insn & 0x800) != 0;
4084 offset = (insn & 0x7ff) << 1;
4085 offset |= (insn & 0x3ff0000) >> 4;
4089 if (offset & 0x1000000)
4090 offset |= ~ ((bfd_signed_vma) 0xffffff);
4093 offset &= ~ ((bfd_signed_vma) 3);
4095 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4096 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4099 if (stub_type != arm_stub_none)
4101 bfd_vma pc_for_insn = base_vma + i + 4;
4103 /* The original instruction is a BL, but the target is
4104 an ARM instruction. If we were not making a stub,
4105 the BL would have been converted to a BLX. Use the
4106 BLX stub instead in that case. */
4107 if (htab->use_blx && force_target_arm
4108 && stub_type == arm_stub_a8_veneer_bl)
4110 stub_type = arm_stub_a8_veneer_blx;
4114 /* Conversely, if the original instruction was
4115 BLX but the target is Thumb mode, use the BL
4117 else if (force_target_thumb
4118 && stub_type == arm_stub_a8_veneer_blx)
4120 stub_type = arm_stub_a8_veneer_bl;
4126 pc_for_insn &= ~ ((bfd_vma) 3);
4128 /* If we found a relocation, use the proper destination,
4129 not the offset in the (unrelocated) instruction.
4130 Note this is always done if we switched the stub type
4134 (bfd_signed_vma) (found->destination - pc_for_insn);
4136 target = pc_for_insn + offset;
4138 /* The BLX stub is ARM-mode code. Adjust the offset to
4139 take the different PC value (+8 instead of +4) into
4141 if (stub_type == arm_stub_a8_veneer_blx)
4144 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4146 char *stub_name = NULL;
4148 if (num_a8_fixes == a8_fix_table_size)
4150 a8_fix_table_size *= 2;
4151 a8_fixes = (struct a8_erratum_fix *)
4152 bfd_realloc (a8_fixes,
4153 sizeof (struct a8_erratum_fix)
4154 * a8_fix_table_size);
4157 if (num_a8_fixes < prev_num_a8_fixes)
4159 /* If we're doing a subsequent scan,
4160 check if we've found the same fix as
4161 before, and try and reuse the stub
4163 stub_name = a8_fixes[num_a8_fixes].stub_name;
4164 if ((a8_fixes[num_a8_fixes].section != section)
4165 || (a8_fixes[num_a8_fixes].offset != i))
4169 *stub_changed_p = TRUE;
4175 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4176 if (stub_name != NULL)
4177 sprintf (stub_name, "%x:%x", section->id, i);
4180 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4181 a8_fixes[num_a8_fixes].section = section;
4182 a8_fixes[num_a8_fixes].offset = i;
4183 a8_fixes[num_a8_fixes].addend = offset;
4184 a8_fixes[num_a8_fixes].orig_insn = insn;
4185 a8_fixes[num_a8_fixes].stub_name = stub_name;
4186 a8_fixes[num_a8_fixes].stub_type = stub_type;
4193 i += insn_32bit ? 4 : 2;
4194 last_was_32bit = insn_32bit;
4195 last_was_branch = is_32bit_branch;
4199 if (elf_section_data (section)->this_hdr.contents == NULL)
4203 *a8_fixes_p = a8_fixes;
4204 *num_a8_fixes_p = num_a8_fixes;
4205 *a8_fix_table_size_p = a8_fix_table_size;
4210 /* Determine and set the size of the stub section for a final link.
4212 The basic idea here is to examine all the relocations looking for
4213 PC-relative calls to a target that is unreachable with a "bl"
4217 elf32_arm_size_stubs (bfd *output_bfd,
4219 struct bfd_link_info *info,
4220 bfd_signed_vma group_size,
4221 asection * (*add_stub_section) (const char *, asection *),
4222 void (*layout_sections_again) (void))
4224 bfd_size_type stub_group_size;
4225 bfd_boolean stubs_always_after_branch;
4226 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4227 struct a8_erratum_fix *a8_fixes = NULL;
4228 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4229 struct a8_erratum_reloc *a8_relocs = NULL;
4230 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4232 if (htab->fix_cortex_a8)
4234 a8_fixes = (struct a8_erratum_fix *)
4235 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4236 a8_relocs = (struct a8_erratum_reloc *)
4237 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4240 /* Propagate mach to stub bfd, because it may not have been
4241 finalized when we created stub_bfd. */
4242 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4243 bfd_get_mach (output_bfd));
4245 /* Stash our params away. */
4246 htab->stub_bfd = stub_bfd;
4247 htab->add_stub_section = add_stub_section;
4248 htab->layout_sections_again = layout_sections_again;
4249 stubs_always_after_branch = group_size < 0;
4251 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4252 as the first half of a 32-bit branch straddling two 4K pages. This is a
4253 crude way of enforcing that. */
4254 if (htab->fix_cortex_a8)
4255 stubs_always_after_branch = 1;
4258 stub_group_size = -group_size;
4260 stub_group_size = group_size;
4262 if (stub_group_size == 1)
4264 /* Default values. */
4265 /* Thumb branch range is +-4MB has to be used as the default
4266 maximum size (a given section can contain both ARM and Thumb
4267 code, so the worst case has to be taken into account).
4269 This value is 24K less than that, which allows for 2025
4270 12-byte stubs. If we exceed that, then we will fail to link.
4271 The user will have to relink with an explicit group size
4273 stub_group_size = 4170000;
4276 group_sections (htab, stub_group_size, stubs_always_after_branch);
4278 /* If we're applying the cortex A8 fix, we need to determine the
4279 program header size now, because we cannot change it later --
4280 that could alter section placements. Notice the A8 erratum fix
4281 ends up requiring the section addresses to remain unchanged
4282 modulo the page size. That's something we cannot represent
4283 inside BFD, and we don't want to force the section alignment to
4284 be the page size. */
4285 if (htab->fix_cortex_a8)
4286 (*htab->layout_sections_again) ();
4291 unsigned int bfd_indx;
4293 bfd_boolean stub_changed = FALSE;
4294 unsigned prev_num_a8_fixes = num_a8_fixes;
4297 for (input_bfd = info->input_bfds, bfd_indx = 0;
4299 input_bfd = input_bfd->link_next, bfd_indx++)
4301 Elf_Internal_Shdr *symtab_hdr;
4303 Elf_Internal_Sym *local_syms = NULL;
4307 /* We'll need the symbol table in a second. */
4308 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4309 if (symtab_hdr->sh_info == 0)
4312 /* Walk over each section attached to the input bfd. */
4313 for (section = input_bfd->sections;
4315 section = section->next)
4317 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4319 /* If there aren't any relocs, then there's nothing more
4321 if ((section->flags & SEC_RELOC) == 0
4322 || section->reloc_count == 0
4323 || (section->flags & SEC_CODE) == 0)
4326 /* If this section is a link-once section that will be
4327 discarded, then don't create any stubs. */
4328 if (section->output_section == NULL
4329 || section->output_section->owner != output_bfd)
4332 /* Get the relocs. */
4334 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4335 NULL, info->keep_memory);
4336 if (internal_relocs == NULL)
4337 goto error_ret_free_local;
4339 /* Now examine each relocation. */
4340 irela = internal_relocs;
4341 irelaend = irela + section->reloc_count;
4342 for (; irela < irelaend; irela++)
4344 unsigned int r_type, r_indx;
4345 enum elf32_arm_stub_type stub_type;
4346 struct elf32_arm_stub_hash_entry *stub_entry;
4349 bfd_vma destination;
4350 struct elf32_arm_link_hash_entry *hash;
4351 const char *sym_name;
4353 const asection *id_sec;
4354 unsigned char st_type;
4355 bfd_boolean created_stub = FALSE;
4357 r_type = ELF32_R_TYPE (irela->r_info);
4358 r_indx = ELF32_R_SYM (irela->r_info);
4360 if (r_type >= (unsigned int) R_ARM_max)
4362 bfd_set_error (bfd_error_bad_value);
4363 error_ret_free_internal:
4364 if (elf_section_data (section)->relocs == NULL)
4365 free (internal_relocs);
4366 goto error_ret_free_local;
4369 /* Only look for stubs on branch instructions. */
4370 if ((r_type != (unsigned int) R_ARM_CALL)
4371 && (r_type != (unsigned int) R_ARM_THM_CALL)
4372 && (r_type != (unsigned int) R_ARM_JUMP24)
4373 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4374 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4375 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4376 && (r_type != (unsigned int) R_ARM_PLT32))
4379 /* Now determine the call target, its name, value,
4386 if (r_indx < symtab_hdr->sh_info)
4388 /* It's a local symbol. */
4389 Elf_Internal_Sym *sym;
4390 Elf_Internal_Shdr *hdr;
4392 if (local_syms == NULL)
4395 = (Elf_Internal_Sym *) symtab_hdr->contents;
4396 if (local_syms == NULL)
4398 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4399 symtab_hdr->sh_info, 0,
4401 if (local_syms == NULL)
4402 goto error_ret_free_internal;
4405 sym = local_syms + r_indx;
4406 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4407 sym_sec = hdr->bfd_section;
4409 /* This is an undefined symbol. It can never
4413 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4414 sym_value = sym->st_value;
4415 destination = (sym_value + irela->r_addend
4416 + sym_sec->output_offset
4417 + sym_sec->output_section->vma);
4418 st_type = ELF_ST_TYPE (sym->st_info);
4420 = bfd_elf_string_from_elf_section (input_bfd,
4421 symtab_hdr->sh_link,
4426 /* It's an external symbol. */
4429 e_indx = r_indx - symtab_hdr->sh_info;
4430 hash = ((struct elf32_arm_link_hash_entry *)
4431 elf_sym_hashes (input_bfd)[e_indx]);
4433 while (hash->root.root.type == bfd_link_hash_indirect
4434 || hash->root.root.type == bfd_link_hash_warning)
4435 hash = ((struct elf32_arm_link_hash_entry *)
4436 hash->root.root.u.i.link);
4438 if (hash->root.root.type == bfd_link_hash_defined
4439 || hash->root.root.type == bfd_link_hash_defweak)
4441 sym_sec = hash->root.root.u.def.section;
4442 sym_value = hash->root.root.u.def.value;
4444 struct elf32_arm_link_hash_table *globals =
4445 elf32_arm_hash_table (info);
4447 /* For a destination in a shared library,
4448 use the PLT stub as target address to
4449 decide whether a branch stub is
4451 if (globals->splt != NULL && hash != NULL
4452 && hash->root.plt.offset != (bfd_vma) -1)
4454 sym_sec = globals->splt;
4455 sym_value = hash->root.plt.offset;
4456 if (sym_sec->output_section != NULL)
4457 destination = (sym_value
4458 + sym_sec->output_offset
4459 + sym_sec->output_section->vma);
4461 else if (sym_sec->output_section != NULL)
4462 destination = (sym_value + irela->r_addend
4463 + sym_sec->output_offset
4464 + sym_sec->output_section->vma);
4466 else if ((hash->root.root.type == bfd_link_hash_undefined)
4467 || (hash->root.root.type == bfd_link_hash_undefweak))
4469 /* For a shared library, use the PLT stub as
4470 target address to decide whether a long
4471 branch stub is needed.
4472 For absolute code, they cannot be handled. */
4473 struct elf32_arm_link_hash_table *globals =
4474 elf32_arm_hash_table (info);
4476 if (globals->splt != NULL && hash != NULL
4477 && hash->root.plt.offset != (bfd_vma) -1)
4479 sym_sec = globals->splt;
4480 sym_value = hash->root.plt.offset;
4481 if (sym_sec->output_section != NULL)
4482 destination = (sym_value
4483 + sym_sec->output_offset
4484 + sym_sec->output_section->vma);
4491 bfd_set_error (bfd_error_bad_value);
4492 goto error_ret_free_internal;
4494 st_type = ELF_ST_TYPE (hash->root.type);
4495 sym_name = hash->root.root.root.string;
4500 /* Determine what (if any) linker stub is needed. */
4501 stub_type = arm_type_of_stub (info, section, irela,
4503 destination, sym_sec,
4504 input_bfd, sym_name);
4505 if (stub_type == arm_stub_none)
4508 /* Support for grouping stub sections. */
4509 id_sec = htab->stub_group[section->id].link_sec;
4511 /* Get the name of this stub. */
4512 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4515 goto error_ret_free_internal;
4517 /* We've either created a stub for this reloc already,
4518 or we are about to. */
4519 created_stub = TRUE;
4521 stub_entry = arm_stub_hash_lookup
4522 (&htab->stub_hash_table, stub_name,
4524 if (stub_entry != NULL)
4526 /* The proper stub has already been created. */
4528 stub_entry->target_value = sym_value;
4532 stub_entry = elf32_arm_add_stub (stub_name, section,
4534 if (stub_entry == NULL)
4537 goto error_ret_free_internal;
4540 stub_entry->target_value = sym_value;
4541 stub_entry->target_section = sym_sec;
4542 stub_entry->stub_type = stub_type;
4543 stub_entry->h = hash;
4544 stub_entry->st_type = st_type;
4546 if (sym_name == NULL)
4547 sym_name = "unnamed";
4548 stub_entry->output_name = (char *)
4549 bfd_alloc (htab->stub_bfd,
4550 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4551 + strlen (sym_name));
4552 if (stub_entry->output_name == NULL)
4555 goto error_ret_free_internal;
4558 /* For historical reasons, use the existing names for
4559 ARM-to-Thumb and Thumb-to-ARM stubs. */
4560 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4561 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4562 && st_type != STT_ARM_TFUNC)
4563 sprintf (stub_entry->output_name,
4564 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4565 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4566 || (r_type == (unsigned int) R_ARM_JUMP24))
4567 && st_type == STT_ARM_TFUNC)
4568 sprintf (stub_entry->output_name,
4569 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4571 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4574 stub_changed = TRUE;
4578 /* Look for relocations which might trigger Cortex-A8
4580 if (htab->fix_cortex_a8
4581 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4582 || r_type == (unsigned int) R_ARM_THM_JUMP19
4583 || r_type == (unsigned int) R_ARM_THM_CALL
4584 || r_type == (unsigned int) R_ARM_THM_XPC22))
4586 bfd_vma from = section->output_section->vma
4587 + section->output_offset
4590 if ((from & 0xfff) == 0xffe)
4592 /* Found a candidate. Note we haven't checked the
4593 destination is within 4K here: if we do so (and
4594 don't create an entry in a8_relocs) we can't tell
4595 that a branch should have been relocated when
4597 if (num_a8_relocs == a8_reloc_table_size)
4599 a8_reloc_table_size *= 2;
4600 a8_relocs = (struct a8_erratum_reloc *)
4601 bfd_realloc (a8_relocs,
4602 sizeof (struct a8_erratum_reloc)
4603 * a8_reloc_table_size);
4606 a8_relocs[num_a8_relocs].from = from;
4607 a8_relocs[num_a8_relocs].destination = destination;
4608 a8_relocs[num_a8_relocs].r_type = r_type;
4609 a8_relocs[num_a8_relocs].st_type = st_type;
4610 a8_relocs[num_a8_relocs].sym_name = sym_name;
4611 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4618 /* We're done with the internal relocs, free them. */
4619 if (elf_section_data (section)->relocs == NULL)
4620 free (internal_relocs);
4623 if (htab->fix_cortex_a8)
4625 /* Sort relocs which might apply to Cortex-A8 erratum. */
4626 qsort (a8_relocs, num_a8_relocs,
4627 sizeof (struct a8_erratum_reloc),
4630 /* Scan for branches which might trigger Cortex-A8 erratum. */
4631 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4632 &num_a8_fixes, &a8_fix_table_size,
4633 a8_relocs, num_a8_relocs,
4634 prev_num_a8_fixes, &stub_changed)
4636 goto error_ret_free_local;
4640 if (prev_num_a8_fixes != num_a8_fixes)
4641 stub_changed = TRUE;
4646 /* OK, we've added some stubs. Find out the new size of the
4648 for (stub_sec = htab->stub_bfd->sections;
4650 stub_sec = stub_sec->next)
4652 /* Ignore non-stub sections. */
4653 if (!strstr (stub_sec->name, STUB_SUFFIX))
4659 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4661 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4662 if (htab->fix_cortex_a8)
4663 for (i = 0; i < num_a8_fixes; i++)
4665 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4666 a8_fixes[i].section, htab);
4668 if (stub_sec == NULL)
4669 goto error_ret_free_local;
4672 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4677 /* Ask the linker to do its stuff. */
4678 (*htab->layout_sections_again) ();
4681 /* Add stubs for Cortex-A8 erratum fixes now. */
4682 if (htab->fix_cortex_a8)
4684 for (i = 0; i < num_a8_fixes; i++)
4686 struct elf32_arm_stub_hash_entry *stub_entry;
4687 char *stub_name = a8_fixes[i].stub_name;
4688 asection *section = a8_fixes[i].section;
4689 unsigned int section_id = a8_fixes[i].section->id;
4690 asection *link_sec = htab->stub_group[section_id].link_sec;
4691 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4692 const insn_sequence *template_sequence;
4693 int template_size, size = 0;
4695 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4697 if (stub_entry == NULL)
4699 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4705 stub_entry->stub_sec = stub_sec;
4706 stub_entry->stub_offset = 0;
4707 stub_entry->id_sec = link_sec;
4708 stub_entry->stub_type = a8_fixes[i].stub_type;
4709 stub_entry->target_section = a8_fixes[i].section;
4710 stub_entry->target_value = a8_fixes[i].offset;
4711 stub_entry->target_addend = a8_fixes[i].addend;
4712 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4713 stub_entry->st_type = STT_ARM_TFUNC;
4715 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4719 stub_entry->stub_size = size;
4720 stub_entry->stub_template = template_sequence;
4721 stub_entry->stub_template_size = template_size;
4724 /* Stash the Cortex-A8 erratum fix array for use later in
4725 elf32_arm_write_section(). */
4726 htab->a8_erratum_fixes = a8_fixes;
4727 htab->num_a8_erratum_fixes = num_a8_fixes;
4731 htab->a8_erratum_fixes = NULL;
4732 htab->num_a8_erratum_fixes = 0;
4736 error_ret_free_local:
4740 /* Build all the stubs associated with the current output file. The
4741 stubs are kept in a hash table attached to the main linker hash
4742 table. We also set up the .plt entries for statically linked PIC
4743 functions here. This function is called via arm_elf_finish in the
4747 elf32_arm_build_stubs (struct bfd_link_info *info)
4750 struct bfd_hash_table *table;
4751 struct elf32_arm_link_hash_table *htab;
4753 htab = elf32_arm_hash_table (info);
4755 for (stub_sec = htab->stub_bfd->sections;
4757 stub_sec = stub_sec->next)
4761 /* Ignore non-stub sections. */
4762 if (!strstr (stub_sec->name, STUB_SUFFIX))
4765 /* Allocate memory to hold the linker stubs. */
4766 size = stub_sec->size;
4767 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4768 if (stub_sec->contents == NULL && size != 0)
4773 /* Build the stubs as directed by the stub hash table. */
4774 table = &htab->stub_hash_table;
4775 bfd_hash_traverse (table, arm_build_one_stub, info);
4776 if (htab->fix_cortex_a8)
4778 /* Place the cortex a8 stubs last. */
4779 htab->fix_cortex_a8 = -1;
4780 bfd_hash_traverse (table, arm_build_one_stub, info);
4786 /* Locate the Thumb encoded calling stub for NAME. */
4788 static struct elf_link_hash_entry *
4789 find_thumb_glue (struct bfd_link_info *link_info,
4791 char **error_message)
4794 struct elf_link_hash_entry *hash;
4795 struct elf32_arm_link_hash_table *hash_table;
4797 /* We need a pointer to the armelf specific hash table. */
4798 hash_table = elf32_arm_hash_table (link_info);
4800 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4801 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4803 BFD_ASSERT (tmp_name);
4805 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4807 hash = elf_link_hash_lookup
4808 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4811 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4812 tmp_name, name) == -1)
4813 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4820 /* Locate the ARM encoded calling stub for NAME. */
4822 static struct elf_link_hash_entry *
4823 find_arm_glue (struct bfd_link_info *link_info,
4825 char **error_message)
4828 struct elf_link_hash_entry *myh;
4829 struct elf32_arm_link_hash_table *hash_table;
4831 /* We need a pointer to the elfarm specific hash table. */
4832 hash_table = elf32_arm_hash_table (link_info);
4834 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4835 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4837 BFD_ASSERT (tmp_name);
4839 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4841 myh = elf_link_hash_lookup
4842 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4845 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4846 tmp_name, name) == -1)
4847 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4854 /* ARM->Thumb glue (static images):
4858 ldr r12, __func_addr
4861 .word func @ behave as if you saw a ARM_32 reloc.
4868 .word func @ behave as if you saw a ARM_32 reloc.
4870 (relocatable images)
4873 ldr r12, __func_offset
4879 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4880 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4881 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4882 static const insn32 a2t3_func_addr_insn = 0x00000001;
4884 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4885 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4886 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4888 #define ARM2THUMB_PIC_GLUE_SIZE 16
4889 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4890 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4891 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4893 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4897 __func_from_thumb: __func_from_thumb:
4899 nop ldr r6, __func_addr
4909 #define THUMB2ARM_GLUE_SIZE 8
4910 static const insn16 t2a1_bx_pc_insn = 0x4778;
4911 static const insn16 t2a2_noop_insn = 0x46c0;
4912 static const insn32 t2a3_b_insn = 0xea000000;
4914 #define VFP11_ERRATUM_VENEER_SIZE 8
4916 #define ARM_BX_VENEER_SIZE 12
4917 static const insn32 armbx1_tst_insn = 0xe3100001;
4918 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4919 static const insn32 armbx3_bx_insn = 0xe12fff10;
4921 #ifndef ELFARM_NABI_C_INCLUDED
4923 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4926 bfd_byte * contents;
4930 /* Do not include empty glue sections in the output. */
4933 s = bfd_get_section_by_name (abfd, name);
4935 s->flags |= SEC_EXCLUDE;
4940 BFD_ASSERT (abfd != NULL);
4942 s = bfd_get_section_by_name (abfd, name);
4943 BFD_ASSERT (s != NULL);
4945 contents = (bfd_byte *) bfd_alloc (abfd, size);
4947 BFD_ASSERT (s->size == size);
4948 s->contents = contents;
4952 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4954 struct elf32_arm_link_hash_table * globals;
4956 globals = elf32_arm_hash_table (info);
4957 BFD_ASSERT (globals != NULL);
4959 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4960 globals->arm_glue_size,
4961 ARM2THUMB_GLUE_SECTION_NAME);
4963 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4964 globals->thumb_glue_size,
4965 THUMB2ARM_GLUE_SECTION_NAME);
4967 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4968 globals->vfp11_erratum_glue_size,
4969 VFP11_ERRATUM_VENEER_SECTION_NAME);
4971 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4972 globals->bx_glue_size,
4973 ARM_BX_GLUE_SECTION_NAME);
4978 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4979 returns the symbol identifying the stub. */
4981 static struct elf_link_hash_entry *
4982 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4983 struct elf_link_hash_entry * h)
4985 const char * name = h->root.root.string;
4988 struct elf_link_hash_entry * myh;
4989 struct bfd_link_hash_entry * bh;
4990 struct elf32_arm_link_hash_table * globals;
4994 globals = elf32_arm_hash_table (link_info);
4996 BFD_ASSERT (globals != NULL);
4997 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4999 s = bfd_get_section_by_name
5000 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5002 BFD_ASSERT (s != NULL);
5004 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5005 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5007 BFD_ASSERT (tmp_name);
5009 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5011 myh = elf_link_hash_lookup
5012 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5016 /* We've already seen this guy. */
5021 /* The only trick here is using hash_table->arm_glue_size as the value.
5022 Even though the section isn't allocated yet, this is where we will be
5023 putting it. The +1 on the value marks that the stub has not been
5024 output yet - not that it is a Thumb function. */
5026 val = globals->arm_glue_size + 1;
5027 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5028 tmp_name, BSF_GLOBAL, s, val,
5029 NULL, TRUE, FALSE, &bh);
5031 myh = (struct elf_link_hash_entry *) bh;
5032 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5033 myh->forced_local = 1;
5037 if (link_info->shared || globals->root.is_relocatable_executable
5038 || globals->pic_veneer)
5039 size = ARM2THUMB_PIC_GLUE_SIZE;
5040 else if (globals->use_blx)
5041 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5043 size = ARM2THUMB_STATIC_GLUE_SIZE;
5046 globals->arm_glue_size += size;
5051 /* Allocate space for ARMv4 BX veneers. */
5054 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5057 struct elf32_arm_link_hash_table *globals;
5059 struct elf_link_hash_entry *myh;
5060 struct bfd_link_hash_entry *bh;
5063 /* BX PC does not need a veneer. */
5067 globals = elf32_arm_hash_table (link_info);
5069 BFD_ASSERT (globals != NULL);
5070 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5072 /* Check if this veneer has already been allocated. */
5073 if (globals->bx_glue_offset[reg])
5076 s = bfd_get_section_by_name
5077 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5079 BFD_ASSERT (s != NULL);
5081 /* Add symbol for veneer. */
5083 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5085 BFD_ASSERT (tmp_name);
5087 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5089 myh = elf_link_hash_lookup
5090 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5092 BFD_ASSERT (myh == NULL);
5095 val = globals->bx_glue_size;
5096 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5097 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5098 NULL, TRUE, FALSE, &bh);
5100 myh = (struct elf_link_hash_entry *) bh;
5101 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5102 myh->forced_local = 1;
5104 s->size += ARM_BX_VENEER_SIZE;
5105 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5106 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5110 /* Add an entry to the code/data map for section SEC. */
5113 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5115 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5116 unsigned int newidx;
5118 if (sec_data->map == NULL)
5120 sec_data->map = (elf32_arm_section_map *)
5121 bfd_malloc (sizeof (elf32_arm_section_map));
5122 sec_data->mapcount = 0;
5123 sec_data->mapsize = 1;
5126 newidx = sec_data->mapcount++;
5128 if (sec_data->mapcount > sec_data->mapsize)
5130 sec_data->mapsize *= 2;
5131 sec_data->map = (elf32_arm_section_map *)
5132 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5133 * sizeof (elf32_arm_section_map));
5138 sec_data->map[newidx].vma = vma;
5139 sec_data->map[newidx].type = type;
5144 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5145 veneers are handled for now. */
5148 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5149 elf32_vfp11_erratum_list *branch,
5151 asection *branch_sec,
5152 unsigned int offset)
5155 struct elf32_arm_link_hash_table *hash_table;
5157 struct elf_link_hash_entry *myh;
5158 struct bfd_link_hash_entry *bh;
5160 struct _arm_elf_section_data *sec_data;
5162 elf32_vfp11_erratum_list *newerr;
5164 hash_table = elf32_arm_hash_table (link_info);
5166 BFD_ASSERT (hash_table != NULL);
5167 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5169 s = bfd_get_section_by_name
5170 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5172 sec_data = elf32_arm_section_data (s);
5174 BFD_ASSERT (s != NULL);
5176 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5177 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5179 BFD_ASSERT (tmp_name);
5181 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5182 hash_table->num_vfp11_fixes);
5184 myh = elf_link_hash_lookup
5185 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5187 BFD_ASSERT (myh == NULL);
5190 val = hash_table->vfp11_erratum_glue_size;
5191 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5192 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5193 NULL, TRUE, FALSE, &bh);
5195 myh = (struct elf_link_hash_entry *) bh;
5196 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5197 myh->forced_local = 1;
5199 /* Link veneer back to calling location. */
5200 errcount = ++(sec_data->erratumcount);
5201 newerr = (elf32_vfp11_erratum_list *)
5202 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5204 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5206 newerr->u.v.branch = branch;
5207 newerr->u.v.id = hash_table->num_vfp11_fixes;
5208 branch->u.b.veneer = newerr;
5210 newerr->next = sec_data->erratumlist;
5211 sec_data->erratumlist = newerr;
5213 /* A symbol for the return from the veneer. */
5214 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5215 hash_table->num_vfp11_fixes);
5217 myh = elf_link_hash_lookup
5218 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5225 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5226 branch_sec, val, NULL, TRUE, FALSE, &bh);
5228 myh = (struct elf_link_hash_entry *) bh;
5229 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5230 myh->forced_local = 1;
5234 /* Generate a mapping symbol for the veneer section, and explicitly add an
5235 entry for that symbol to the code/data map for the section. */
5236 if (hash_table->vfp11_erratum_glue_size == 0)
5239 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5240 ever requires this erratum fix. */
5241 _bfd_generic_link_add_one_symbol (link_info,
5242 hash_table->bfd_of_glue_owner, "$a",
5243 BSF_LOCAL, s, 0, NULL,
5246 myh = (struct elf_link_hash_entry *) bh;
5247 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5248 myh->forced_local = 1;
5250 /* The elf32_arm_init_maps function only cares about symbols from input
5251 BFDs. We must make a note of this generated mapping symbol
5252 ourselves so that code byteswapping works properly in
5253 elf32_arm_write_section. */
5254 elf32_arm_section_map_add (s, 'a', 0);
5257 s->size += VFP11_ERRATUM_VENEER_SIZE;
5258 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5259 hash_table->num_vfp11_fixes++;
5261 /* The offset of the veneer. */
5265 #define ARM_GLUE_SECTION_FLAGS \
5266 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5267 | SEC_READONLY | SEC_LINKER_CREATED)
5269 /* Create a fake section for use by the ARM backend of the linker. */
5272 arm_make_glue_section (bfd * abfd, const char * name)
5276 sec = bfd_get_section_by_name (abfd, name);
5281 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5284 || !bfd_set_section_alignment (abfd, sec, 2))
5287 /* Set the gc mark to prevent the section from being removed by garbage
5288 collection, despite the fact that no relocs refer to this section. */
5294 /* Add the glue sections to ABFD. This function is called from the
5295 linker scripts in ld/emultempl/{armelf}.em. */
5298 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5299 struct bfd_link_info *info)
5301 /* If we are only performing a partial
5302 link do not bother adding the glue. */
5303 if (info->relocatable)
5306 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5307 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5308 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5309 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5312 /* Select a BFD to be used to hold the sections used by the glue code.
5313 This function is called from the linker scripts in ld/emultempl/
5317 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5319 struct elf32_arm_link_hash_table *globals;
5321 /* If we are only performing a partial link
5322 do not bother getting a bfd to hold the glue. */
5323 if (info->relocatable)
5326 /* Make sure we don't attach the glue sections to a dynamic object. */
5327 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5329 globals = elf32_arm_hash_table (info);
5331 BFD_ASSERT (globals != NULL);
5333 if (globals->bfd_of_glue_owner != NULL)
5336 /* Save the bfd for later use. */
5337 globals->bfd_of_glue_owner = abfd;
5343 check_use_blx (struct elf32_arm_link_hash_table *globals)
5345 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5347 globals->use_blx = 1;
5351 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5352 struct bfd_link_info *link_info)
5354 Elf_Internal_Shdr *symtab_hdr;
5355 Elf_Internal_Rela *internal_relocs = NULL;
5356 Elf_Internal_Rela *irel, *irelend;
5357 bfd_byte *contents = NULL;
5360 struct elf32_arm_link_hash_table *globals;
5362 /* If we are only performing a partial link do not bother
5363 to construct any glue. */
5364 if (link_info->relocatable)
5367 /* Here we have a bfd that is to be included on the link. We have a
5368 hook to do reloc rummaging, before section sizes are nailed down. */
5369 globals = elf32_arm_hash_table (link_info);
5371 BFD_ASSERT (globals != NULL);
5373 check_use_blx (globals);
5375 if (globals->byteswap_code && !bfd_big_endian (abfd))
5377 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5382 /* PR 5398: If we have not decided to include any loadable sections in
5383 the output then we will not have a glue owner bfd. This is OK, it
5384 just means that there is nothing else for us to do here. */
5385 if (globals->bfd_of_glue_owner == NULL)
5388 /* Rummage around all the relocs and map the glue vectors. */
5389 sec = abfd->sections;
5394 for (; sec != NULL; sec = sec->next)
5396 if (sec->reloc_count == 0)
5399 if ((sec->flags & SEC_EXCLUDE) != 0)
5402 symtab_hdr = & elf_symtab_hdr (abfd);
5404 /* Load the relocs. */
5406 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5408 if (internal_relocs == NULL)
5411 irelend = internal_relocs + sec->reloc_count;
5412 for (irel = internal_relocs; irel < irelend; irel++)
5415 unsigned long r_index;
5417 struct elf_link_hash_entry *h;
5419 r_type = ELF32_R_TYPE (irel->r_info);
5420 r_index = ELF32_R_SYM (irel->r_info);
5422 /* These are the only relocation types we care about. */
5423 if ( r_type != R_ARM_PC24
5424 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5427 /* Get the section contents if we haven't done so already. */
5428 if (contents == NULL)
5430 /* Get cached copy if it exists. */
5431 if (elf_section_data (sec)->this_hdr.contents != NULL)
5432 contents = elf_section_data (sec)->this_hdr.contents;
5435 /* Go get them off disk. */
5436 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5441 if (r_type == R_ARM_V4BX)
5445 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5446 record_arm_bx_glue (link_info, reg);
5450 /* If the relocation is not against a symbol it cannot concern us. */
5453 /* We don't care about local symbols. */
5454 if (r_index < symtab_hdr->sh_info)
5457 /* This is an external symbol. */
5458 r_index -= symtab_hdr->sh_info;
5459 h = (struct elf_link_hash_entry *)
5460 elf_sym_hashes (abfd)[r_index];
5462 /* If the relocation is against a static symbol it must be within
5463 the current section and so cannot be a cross ARM/Thumb relocation. */
5467 /* If the call will go through a PLT entry then we do not need
5469 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5475 /* This one is a call from arm code. We need to look up
5476 the target of the call. If it is a thumb target, we
5478 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5479 record_arm_to_thumb_glue (link_info, h);
5487 if (contents != NULL
5488 && elf_section_data (sec)->this_hdr.contents != contents)
5492 if (internal_relocs != NULL
5493 && elf_section_data (sec)->relocs != internal_relocs)
5494 free (internal_relocs);
5495 internal_relocs = NULL;
5501 if (contents != NULL
5502 && elf_section_data (sec)->this_hdr.contents != contents)
5504 if (internal_relocs != NULL
5505 && elf_section_data (sec)->relocs != internal_relocs)
5506 free (internal_relocs);
5513 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5516 bfd_elf32_arm_init_maps (bfd *abfd)
5518 Elf_Internal_Sym *isymbuf;
5519 Elf_Internal_Shdr *hdr;
5520 unsigned int i, localsyms;
5522 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5523 if (! is_arm_elf (abfd))
5526 if ((abfd->flags & DYNAMIC) != 0)
5529 hdr = & elf_symtab_hdr (abfd);
5530 localsyms = hdr->sh_info;
5532 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5533 should contain the number of local symbols, which should come before any
5534 global symbols. Mapping symbols are always local. */
5535 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5538 /* No internal symbols read? Skip this BFD. */
5539 if (isymbuf == NULL)
5542 for (i = 0; i < localsyms; i++)
5544 Elf_Internal_Sym *isym = &isymbuf[i];
5545 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5549 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5551 name = bfd_elf_string_from_elf_section (abfd,
5552 hdr->sh_link, isym->st_name);
5554 if (bfd_is_arm_special_symbol_name (name,
5555 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5556 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5562 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5563 say what they wanted. */
5566 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5568 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5569 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5571 if (globals->fix_cortex_a8 == -1)
5573 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5574 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5575 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5576 || out_attr[Tag_CPU_arch_profile].i == 0))
5577 globals->fix_cortex_a8 = 1;
5579 globals->fix_cortex_a8 = 0;
5585 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5587 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5588 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5590 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5591 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5593 switch (globals->vfp11_fix)
5595 case BFD_ARM_VFP11_FIX_DEFAULT:
5596 case BFD_ARM_VFP11_FIX_NONE:
5597 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5601 /* Give a warning, but do as the user requests anyway. */
5602 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5603 "workaround is not necessary for target architecture"), obfd);
5606 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5607 /* For earlier architectures, we might need the workaround, but do not
5608 enable it by default. If users is running with broken hardware, they
5609 must enable the erratum fix explicitly. */
5610 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5614 enum bfd_arm_vfp11_pipe
5622 /* Return a VFP register number. This is encoded as RX:X for single-precision
5623 registers, or X:RX for double-precision registers, where RX is the group of
5624 four bits in the instruction encoding and X is the single extension bit.
5625 RX and X fields are specified using their lowest (starting) bit. The return
5628 0...31: single-precision registers s0...s31
5629 32...63: double-precision registers d0...d31.
5631 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5632 encounter VFP3 instructions, so we allow the full range for DP registers. */
5635 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5639 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5641 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5644 /* Set bits in *WMASK according to a register number REG as encoded by
5645 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5648 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5653 *wmask |= 3 << ((reg - 32) * 2);
5656 /* Return TRUE if WMASK overwrites anything in REGS. */
5659 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5663 for (i = 0; i < numregs; i++)
5665 unsigned int reg = regs[i];
5667 if (reg < 32 && (wmask & (1 << reg)) != 0)
5675 if ((wmask & (3 << (reg * 2))) != 0)
5682 /* In this function, we're interested in two things: finding input registers
5683 for VFP data-processing instructions, and finding the set of registers which
5684 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5685 hold the written set, so FLDM etc. are easy to deal with (we're only
5686 interested in 32 SP registers or 16 dp registers, due to the VFP version
5687 implemented by the chip in question). DP registers are marked by setting
5688 both SP registers in the write mask). */
5690 static enum bfd_arm_vfp11_pipe
5691 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5694 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5695 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5697 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5700 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5701 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5703 pqrs = ((insn & 0x00800000) >> 20)
5704 | ((insn & 0x00300000) >> 19)
5705 | ((insn & 0x00000040) >> 6);
5709 case 0: /* fmac[sd]. */
5710 case 1: /* fnmac[sd]. */
5711 case 2: /* fmsc[sd]. */
5712 case 3: /* fnmsc[sd]. */
5714 bfd_arm_vfp11_write_mask (destmask, fd);
5716 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5721 case 4: /* fmul[sd]. */
5722 case 5: /* fnmul[sd]. */
5723 case 6: /* fadd[sd]. */
5724 case 7: /* fsub[sd]. */
5728 case 8: /* fdiv[sd]. */
5731 bfd_arm_vfp11_write_mask (destmask, fd);
5732 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5737 case 15: /* extended opcode. */
5739 unsigned int extn = ((insn >> 15) & 0x1e)
5740 | ((insn >> 7) & 1);
5744 case 0: /* fcpy[sd]. */
5745 case 1: /* fabs[sd]. */
5746 case 2: /* fneg[sd]. */
5747 case 8: /* fcmp[sd]. */
5748 case 9: /* fcmpe[sd]. */
5749 case 10: /* fcmpz[sd]. */
5750 case 11: /* fcmpez[sd]. */
5751 case 16: /* fuito[sd]. */
5752 case 17: /* fsito[sd]. */
5753 case 24: /* ftoui[sd]. */
5754 case 25: /* ftouiz[sd]. */
5755 case 26: /* ftosi[sd]. */
5756 case 27: /* ftosiz[sd]. */
5757 /* These instructions will not bounce due to underflow. */
5762 case 3: /* fsqrt[sd]. */
5763 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5764 registers to cause the erratum in previous instructions. */
5765 bfd_arm_vfp11_write_mask (destmask, fd);
5769 case 15: /* fcvt{ds,sd}. */
5773 bfd_arm_vfp11_write_mask (destmask, fd);
5775 /* Only FCVTSD can underflow. */
5776 if ((insn & 0x100) != 0)
5795 /* Two-register transfer. */
5796 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5798 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5800 if ((insn & 0x100000) == 0)
5803 bfd_arm_vfp11_write_mask (destmask, fm);
5806 bfd_arm_vfp11_write_mask (destmask, fm);
5807 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5813 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5815 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5816 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5820 case 0: /* Two-reg transfer. We should catch these above. */
5823 case 2: /* fldm[sdx]. */
5827 unsigned int i, offset = insn & 0xff;
5832 for (i = fd; i < fd + offset; i++)
5833 bfd_arm_vfp11_write_mask (destmask, i);
5837 case 4: /* fld[sd]. */
5839 bfd_arm_vfp11_write_mask (destmask, fd);
5848 /* Single-register transfer. Note L==0. */
5849 else if ((insn & 0x0f100e10) == 0x0e000a10)
5851 unsigned int opcode = (insn >> 21) & 7;
5852 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5856 case 0: /* fmsr/fmdlr. */
5857 case 1: /* fmdhr. */
5858 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5859 destination register. I don't know if this is exactly right,
5860 but it is the conservative choice. */
5861 bfd_arm_vfp11_write_mask (destmask, fn);
5875 static int elf32_arm_compare_mapping (const void * a, const void * b);
5878 /* Look for potentially-troublesome code sequences which might trigger the
5879 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5880 (available from ARM) for details of the erratum. A short version is
5881 described in ld.texinfo. */
5884 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5887 bfd_byte *contents = NULL;
5889 int regs[3], numregs = 0;
5890 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5891 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5893 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5894 The states transition as follows:
5896 0 -> 1 (vector) or 0 -> 2 (scalar)
5897 A VFP FMAC-pipeline instruction has been seen. Fill
5898 regs[0]..regs[numregs-1] with its input operands. Remember this
5899 instruction in 'first_fmac'.
5902 Any instruction, except for a VFP instruction which overwrites
5907 A VFP instruction has been seen which overwrites any of regs[*].
5908 We must make a veneer! Reset state to 0 before examining next
5912 If we fail to match anything in state 2, reset to state 0 and reset
5913 the instruction pointer to the instruction after 'first_fmac'.
5915 If the VFP11 vector mode is in use, there must be at least two unrelated
5916 instructions between anti-dependent VFP11 instructions to properly avoid
5917 triggering the erratum, hence the use of the extra state 1. */
5919 /* If we are only performing a partial link do not bother
5920 to construct any glue. */
5921 if (link_info->relocatable)
5924 /* Skip if this bfd does not correspond to an ELF image. */
5925 if (! is_arm_elf (abfd))
5928 /* We should have chosen a fix type by the time we get here. */
5929 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5931 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5934 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5935 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5938 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5940 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5941 struct _arm_elf_section_data *sec_data;
5943 /* If we don't have executable progbits, we're not interested in this
5944 section. Also skip if section is to be excluded. */
5945 if (elf_section_type (sec) != SHT_PROGBITS
5946 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5947 || (sec->flags & SEC_EXCLUDE) != 0
5948 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5949 || sec->output_section == bfd_abs_section_ptr
5950 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5953 sec_data = elf32_arm_section_data (sec);
5955 if (sec_data->mapcount == 0)
5958 if (elf_section_data (sec)->this_hdr.contents != NULL)
5959 contents = elf_section_data (sec)->this_hdr.contents;
5960 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5963 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5964 elf32_arm_compare_mapping);
5966 for (span = 0; span < sec_data->mapcount; span++)
5968 unsigned int span_start = sec_data->map[span].vma;
5969 unsigned int span_end = (span == sec_data->mapcount - 1)
5970 ? sec->size : sec_data->map[span + 1].vma;
5971 char span_type = sec_data->map[span].type;
5973 /* FIXME: Only ARM mode is supported at present. We may need to
5974 support Thumb-2 mode also at some point. */
5975 if (span_type != 'a')
5978 for (i = span_start; i < span_end;)
5980 unsigned int next_i = i + 4;
5981 unsigned int insn = bfd_big_endian (abfd)
5982 ? (contents[i] << 24)
5983 | (contents[i + 1] << 16)
5984 | (contents[i + 2] << 8)
5986 : (contents[i + 3] << 24)
5987 | (contents[i + 2] << 16)
5988 | (contents[i + 1] << 8)
5990 unsigned int writemask = 0;
5991 enum bfd_arm_vfp11_pipe pipe;
5996 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5998 /* I'm assuming the VFP11 erratum can trigger with denorm
5999 operands on either the FMAC or the DS pipeline. This might
6000 lead to slightly overenthusiastic veneer insertion. */
6001 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
6003 state = use_vector ? 1 : 2;
6005 veneer_of_insn = insn;
6011 int other_regs[3], other_numregs;
6012 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6015 if (pipe != VFP11_BAD
6016 && bfd_arm_vfp11_antidependency (writemask, regs,
6026 int other_regs[3], other_numregs;
6027 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6030 if (pipe != VFP11_BAD
6031 && bfd_arm_vfp11_antidependency (writemask, regs,
6037 next_i = first_fmac + 4;
6043 abort (); /* Should be unreachable. */
6048 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6049 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6052 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6054 newerr->u.b.vfp_insn = veneer_of_insn;
6059 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6066 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6071 newerr->next = sec_data->erratumlist;
6072 sec_data->erratumlist = newerr;
6081 if (contents != NULL
6082 && elf_section_data (sec)->this_hdr.contents != contents)
6090 if (contents != NULL
6091 && elf_section_data (sec)->this_hdr.contents != contents)
6097 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6098 after sections have been laid out, using specially-named symbols. */
6101 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6102 struct bfd_link_info *link_info)
6105 struct elf32_arm_link_hash_table *globals;
6108 if (link_info->relocatable)
6111 /* Skip if this bfd does not correspond to an ELF image. */
6112 if (! is_arm_elf (abfd))
6115 globals = elf32_arm_hash_table (link_info);
6117 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6118 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6120 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6122 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6123 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6125 for (; errnode != NULL; errnode = errnode->next)
6127 struct elf_link_hash_entry *myh;
6130 switch (errnode->type)
6132 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6133 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6134 /* Find veneer symbol. */
6135 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6136 errnode->u.b.veneer->u.v.id);
6138 myh = elf_link_hash_lookup
6139 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6142 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6143 "`%s'"), abfd, tmp_name);
6145 vma = myh->root.u.def.section->output_section->vma
6146 + myh->root.u.def.section->output_offset
6147 + myh->root.u.def.value;
6149 errnode->u.b.veneer->vma = vma;
6152 case VFP11_ERRATUM_ARM_VENEER:
6153 case VFP11_ERRATUM_THUMB_VENEER:
6154 /* Find return location. */
6155 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6158 myh = elf_link_hash_lookup
6159 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6162 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6163 "`%s'"), abfd, tmp_name);
6165 vma = myh->root.u.def.section->output_section->vma
6166 + myh->root.u.def.section->output_offset
6167 + myh->root.u.def.value;
6169 errnode->u.v.branch->vma = vma;
6182 /* Set target relocation values needed during linking. */
6185 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6186 struct bfd_link_info *link_info,
6188 char * target2_type,
6191 bfd_arm_vfp11_fix vfp11_fix,
6192 int no_enum_warn, int no_wchar_warn,
6193 int pic_veneer, int fix_cortex_a8)
6195 struct elf32_arm_link_hash_table *globals;
6197 globals = elf32_arm_hash_table (link_info);
6199 globals->target1_is_rel = target1_is_rel;
6200 if (strcmp (target2_type, "rel") == 0)
6201 globals->target2_reloc = R_ARM_REL32;
6202 else if (strcmp (target2_type, "abs") == 0)
6203 globals->target2_reloc = R_ARM_ABS32;
6204 else if (strcmp (target2_type, "got-rel") == 0)
6205 globals->target2_reloc = R_ARM_GOT_PREL;
6208 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6211 globals->fix_v4bx = fix_v4bx;
6212 globals->use_blx |= use_blx;
6213 globals->vfp11_fix = vfp11_fix;
6214 globals->pic_veneer = pic_veneer;
6215 globals->fix_cortex_a8 = fix_cortex_a8;
6217 BFD_ASSERT (is_arm_elf (output_bfd));
6218 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6219 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6222 /* Replace the target offset of a Thumb bl or b.w instruction. */
6225 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6231 BFD_ASSERT ((offset & 1) == 0);
6233 upper = bfd_get_16 (abfd, insn);
6234 lower = bfd_get_16 (abfd, insn + 2);
6235 reloc_sign = (offset < 0) ? 1 : 0;
6236 upper = (upper & ~(bfd_vma) 0x7ff)
6237 | ((offset >> 12) & 0x3ff)
6238 | (reloc_sign << 10);
6239 lower = (lower & ~(bfd_vma) 0x2fff)
6240 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6241 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6242 | ((offset >> 1) & 0x7ff);
6243 bfd_put_16 (abfd, upper, insn);
6244 bfd_put_16 (abfd, lower, insn + 2);
6247 /* Thumb code calling an ARM function. */
6250 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6254 asection * input_section,
6255 bfd_byte * hit_data,
6258 bfd_signed_vma addend,
6260 char **error_message)
6264 long int ret_offset;
6265 struct elf_link_hash_entry * myh;
6266 struct elf32_arm_link_hash_table * globals;
6268 myh = find_thumb_glue (info, name, error_message);
6272 globals = elf32_arm_hash_table (info);
6274 BFD_ASSERT (globals != NULL);
6275 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6277 my_offset = myh->root.u.def.value;
6279 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6280 THUMB2ARM_GLUE_SECTION_NAME);
6282 BFD_ASSERT (s != NULL);
6283 BFD_ASSERT (s->contents != NULL);
6284 BFD_ASSERT (s->output_section != NULL);
6286 if ((my_offset & 0x01) == 0x01)
6289 && sym_sec->owner != NULL
6290 && !INTERWORK_FLAG (sym_sec->owner))
6292 (*_bfd_error_handler)
6293 (_("%B(%s): warning: interworking not enabled.\n"
6294 " first occurrence: %B: thumb call to arm"),
6295 sym_sec->owner, input_bfd, name);
6301 myh->root.u.def.value = my_offset;
6303 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6304 s->contents + my_offset);
6306 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6307 s->contents + my_offset + 2);
6310 /* Address of destination of the stub. */
6311 ((bfd_signed_vma) val)
6313 /* Offset from the start of the current section
6314 to the start of the stubs. */
6316 /* Offset of the start of this stub from the start of the stubs. */
6318 /* Address of the start of the current section. */
6319 + s->output_section->vma)
6320 /* The branch instruction is 4 bytes into the stub. */
6322 /* ARM branches work from the pc of the instruction + 8. */
6325 put_arm_insn (globals, output_bfd,
6326 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6327 s->contents + my_offset + 4);
6330 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6332 /* Now go back and fix up the original BL insn to point to here. */
6334 /* Address of where the stub is located. */
6335 (s->output_section->vma + s->output_offset + my_offset)
6336 /* Address of where the BL is located. */
6337 - (input_section->output_section->vma + input_section->output_offset
6339 /* Addend in the relocation. */
6341 /* Biassing for PC-relative addressing. */
6344 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6349 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6351 static struct elf_link_hash_entry *
6352 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6359 char ** error_message)
6362 long int ret_offset;
6363 struct elf_link_hash_entry * myh;
6364 struct elf32_arm_link_hash_table * globals;
6366 myh = find_arm_glue (info, name, error_message);
6370 globals = elf32_arm_hash_table (info);
6372 BFD_ASSERT (globals != NULL);
6373 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6375 my_offset = myh->root.u.def.value;
6377 if ((my_offset & 0x01) == 0x01)
6380 && sym_sec->owner != NULL
6381 && !INTERWORK_FLAG (sym_sec->owner))
6383 (*_bfd_error_handler)
6384 (_("%B(%s): warning: interworking not enabled.\n"
6385 " first occurrence: %B: arm call to thumb"),
6386 sym_sec->owner, input_bfd, name);
6390 myh->root.u.def.value = my_offset;
6392 if (info->shared || globals->root.is_relocatable_executable
6393 || globals->pic_veneer)
6395 /* For relocatable objects we can't use absolute addresses,
6396 so construct the address from a relative offset. */
6397 /* TODO: If the offset is small it's probably worth
6398 constructing the address with adds. */
6399 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6400 s->contents + my_offset);
6401 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6402 s->contents + my_offset + 4);
6403 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6404 s->contents + my_offset + 8);
6405 /* Adjust the offset by 4 for the position of the add,
6406 and 8 for the pipeline offset. */
6407 ret_offset = (val - (s->output_offset
6408 + s->output_section->vma
6411 bfd_put_32 (output_bfd, ret_offset,
6412 s->contents + my_offset + 12);
6414 else if (globals->use_blx)
6416 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6417 s->contents + my_offset);
6419 /* It's a thumb address. Add the low order bit. */
6420 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6421 s->contents + my_offset + 4);
6425 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6426 s->contents + my_offset);
6428 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6429 s->contents + my_offset + 4);
6431 /* It's a thumb address. Add the low order bit. */
6432 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6433 s->contents + my_offset + 8);
6439 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6444 /* Arm code calling a Thumb function. */
6447 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6451 asection * input_section,
6452 bfd_byte * hit_data,
6455 bfd_signed_vma addend,
6457 char **error_message)
6459 unsigned long int tmp;
6462 long int ret_offset;
6463 struct elf_link_hash_entry * myh;
6464 struct elf32_arm_link_hash_table * globals;
6466 globals = elf32_arm_hash_table (info);
6468 BFD_ASSERT (globals != NULL);
6469 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6471 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6472 ARM2THUMB_GLUE_SECTION_NAME);
6473 BFD_ASSERT (s != NULL);
6474 BFD_ASSERT (s->contents != NULL);
6475 BFD_ASSERT (s->output_section != NULL);
6477 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6478 sym_sec, val, s, error_message);
6482 my_offset = myh->root.u.def.value;
6483 tmp = bfd_get_32 (input_bfd, hit_data);
6484 tmp = tmp & 0xFF000000;
6486 /* Somehow these are both 4 too far, so subtract 8. */
6487 ret_offset = (s->output_offset
6489 + s->output_section->vma
6490 - (input_section->output_offset
6491 + input_section->output_section->vma
6495 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6497 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6502 /* Populate Arm stub for an exported Thumb function. */
6505 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6507 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6509 struct elf_link_hash_entry * myh;
6510 struct elf32_arm_link_hash_entry *eh;
6511 struct elf32_arm_link_hash_table * globals;
6514 char *error_message;
6516 eh = elf32_arm_hash_entry (h);
6517 /* Allocate stubs for exported Thumb functions on v4t. */
6518 if (eh->export_glue == NULL)
6521 globals = elf32_arm_hash_table (info);
6523 BFD_ASSERT (globals != NULL);
6524 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6526 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6527 ARM2THUMB_GLUE_SECTION_NAME);
6528 BFD_ASSERT (s != NULL);
6529 BFD_ASSERT (s->contents != NULL);
6530 BFD_ASSERT (s->output_section != NULL);
6532 sec = eh->export_glue->root.u.def.section;
6534 BFD_ASSERT (sec->output_section != NULL);
6536 val = eh->export_glue->root.u.def.value + sec->output_offset
6537 + sec->output_section->vma;
6539 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6540 h->root.u.def.section->owner,
6541 globals->obfd, sec, val, s,
6547 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6550 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6555 struct elf32_arm_link_hash_table *globals;
6557 globals = elf32_arm_hash_table (info);
6559 BFD_ASSERT (globals != NULL);
6560 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6562 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6563 ARM_BX_GLUE_SECTION_NAME);
6564 BFD_ASSERT (s != NULL);
6565 BFD_ASSERT (s->contents != NULL);
6566 BFD_ASSERT (s->output_section != NULL);
6568 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6570 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6572 if ((globals->bx_glue_offset[reg] & 1) == 0)
6574 p = s->contents + glue_addr;
6575 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6576 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6577 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6578 globals->bx_glue_offset[reg] |= 1;
6581 return glue_addr + s->output_section->vma + s->output_offset;
6584 /* Generate Arm stubs for exported Thumb symbols. */
6586 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6587 struct bfd_link_info *link_info)
6589 struct elf32_arm_link_hash_table * globals;
6591 if (link_info == NULL)
6592 /* Ignore this if we are not called by the ELF backend linker. */
6595 globals = elf32_arm_hash_table (link_info);
6596 /* If blx is available then exported Thumb symbols are OK and there is
6598 if (globals->use_blx)
6601 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6605 /* Some relocations map to different relocations depending on the
6606 target. Return the real relocation. */
6609 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6615 if (globals->target1_is_rel)
6621 return globals->target2_reloc;
6628 /* Return the base VMA address which should be subtracted from real addresses
6629 when resolving @dtpoff relocation.
6630 This is PT_TLS segment p_vaddr. */
6633 dtpoff_base (struct bfd_link_info *info)
6635 /* If tls_sec is NULL, we should have signalled an error already. */
6636 if (elf_hash_table (info)->tls_sec == NULL)
6638 return elf_hash_table (info)->tls_sec->vma;
6641 /* Return the relocation value for @tpoff relocation
6642 if STT_TLS virtual address is ADDRESS. */
6645 tpoff (struct bfd_link_info *info, bfd_vma address)
6647 struct elf_link_hash_table *htab = elf_hash_table (info);
6650 /* If tls_sec is NULL, we should have signalled an error already. */
6651 if (htab->tls_sec == NULL)
6653 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6654 return address - htab->tls_sec->vma + base;
6657 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6658 VALUE is the relocation value. */
6660 static bfd_reloc_status_type
6661 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6664 return bfd_reloc_overflow;
6666 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6667 bfd_put_32 (abfd, value, data);
6668 return bfd_reloc_ok;
6671 /* For a given value of n, calculate the value of G_n as required to
6672 deal with group relocations. We return it in the form of an
6673 encoded constant-and-rotation, together with the final residual. If n is
6674 specified as less than zero, then final_residual is filled with the
6675 input value and no further action is performed. */
6678 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6682 bfd_vma encoded_g_n = 0;
6683 bfd_vma residual = value; /* Also known as Y_n. */
6685 for (current_n = 0; current_n <= n; current_n++)
6689 /* Calculate which part of the value to mask. */
6696 /* Determine the most significant bit in the residual and
6697 align the resulting value to a 2-bit boundary. */
6698 for (msb = 30; msb >= 0; msb -= 2)
6699 if (residual & (3 << msb))
6702 /* The desired shift is now (msb - 6), or zero, whichever
6709 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6710 g_n = residual & (0xff << shift);
6711 encoded_g_n = (g_n >> shift)
6712 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6714 /* Calculate the residual for the next time around. */
6718 *final_residual = residual;
6723 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6724 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6727 identify_add_or_sub (bfd_vma insn)
6729 int opcode = insn & 0x1e00000;
6731 if (opcode == 1 << 23) /* ADD */
6734 if (opcode == 1 << 22) /* SUB */
6740 /* Perform a relocation as part of a final link. */
6742 static bfd_reloc_status_type
6743 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6746 asection * input_section,
6747 bfd_byte * contents,
6748 Elf_Internal_Rela * rel,
6750 struct bfd_link_info * info,
6752 const char * sym_name,
6754 struct elf_link_hash_entry * h,
6755 bfd_boolean * unresolved_reloc_p,
6756 char ** error_message)
6758 unsigned long r_type = howto->type;
6759 unsigned long r_symndx;
6760 bfd_byte * hit_data = contents + rel->r_offset;
6761 bfd * dynobj = NULL;
6762 Elf_Internal_Shdr * symtab_hdr;
6763 struct elf_link_hash_entry ** sym_hashes;
6764 bfd_vma * local_got_offsets;
6765 asection * sgot = NULL;
6766 asection * splt = NULL;
6767 asection * sreloc = NULL;
6769 bfd_signed_vma signed_addend;
6770 struct elf32_arm_link_hash_table * globals;
6772 globals = elf32_arm_hash_table (info);
6774 BFD_ASSERT (is_arm_elf (input_bfd));
6776 /* Some relocation types map to different relocations depending on the
6777 target. We pick the right one here. */
6778 r_type = arm_real_reloc_type (globals, r_type);
6779 if (r_type != howto->type)
6780 howto = elf32_arm_howto_from_type (r_type);
6782 /* If the start address has been set, then set the EF_ARM_HASENTRY
6783 flag. Setting this more than once is redundant, but the cost is
6784 not too high, and it keeps the code simple.
6786 The test is done here, rather than somewhere else, because the
6787 start address is only set just before the final link commences.
6789 Note - if the user deliberately sets a start address of 0, the
6790 flag will not be set. */
6791 if (bfd_get_start_address (output_bfd) != 0)
6792 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6794 dynobj = elf_hash_table (info)->dynobj;
6797 sgot = bfd_get_section_by_name (dynobj, ".got");
6798 splt = bfd_get_section_by_name (dynobj, ".plt");
6800 symtab_hdr = & elf_symtab_hdr (input_bfd);
6801 sym_hashes = elf_sym_hashes (input_bfd);
6802 local_got_offsets = elf_local_got_offsets (input_bfd);
6803 r_symndx = ELF32_R_SYM (rel->r_info);
6805 if (globals->use_rel)
6807 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6809 if (addend & ((howto->src_mask + 1) >> 1))
6812 signed_addend &= ~ howto->src_mask;
6813 signed_addend |= addend;
6816 signed_addend = addend;
6819 addend = signed_addend = rel->r_addend;
6824 /* We don't need to find a value for this symbol. It's just a
6826 *unresolved_reloc_p = FALSE;
6827 return bfd_reloc_ok;
6830 if (!globals->vxworks_p)
6831 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6835 case R_ARM_ABS32_NOI:
6837 case R_ARM_REL32_NOI:
6843 /* Handle relocations which should use the PLT entry. ABS32/REL32
6844 will use the symbol's value, which may point to a PLT entry, but we
6845 don't need to handle that here. If we created a PLT entry, all
6846 branches in this object should go to it, except if the PLT is too
6847 far away, in which case a long branch stub should be inserted. */
6848 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6849 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6850 && r_type != R_ARM_CALL
6851 && r_type != R_ARM_JUMP24
6852 && r_type != R_ARM_PLT32)
6855 && h->plt.offset != (bfd_vma) -1)
6857 /* If we've created a .plt section, and assigned a PLT entry to
6858 this function, it should not be known to bind locally. If
6859 it were, we would have cleared the PLT entry. */
6860 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6862 value = (splt->output_section->vma
6863 + splt->output_offset
6865 *unresolved_reloc_p = FALSE;
6866 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6867 contents, rel->r_offset, value,
6871 /* When generating a shared object or relocatable executable, these
6872 relocations are copied into the output file to be resolved at
6874 if ((info->shared || globals->root.is_relocatable_executable)
6875 && (input_section->flags & SEC_ALLOC)
6876 && !(elf32_arm_hash_table (info)->vxworks_p
6877 && strcmp (input_section->output_section->name,
6879 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6880 || !SYMBOL_CALLS_LOCAL (info, h))
6882 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6883 || h->root.type != bfd_link_hash_undefweak)
6884 && r_type != R_ARM_PC24
6885 && r_type != R_ARM_CALL
6886 && r_type != R_ARM_JUMP24
6887 && r_type != R_ARM_PREL31
6888 && r_type != R_ARM_PLT32)
6890 Elf_Internal_Rela outrel;
6892 bfd_boolean skip, relocate;
6894 *unresolved_reloc_p = FALSE;
6898 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6899 ! globals->use_rel);
6902 return bfd_reloc_notsupported;
6908 outrel.r_addend = addend;
6910 _bfd_elf_section_offset (output_bfd, info, input_section,
6912 if (outrel.r_offset == (bfd_vma) -1)
6914 else if (outrel.r_offset == (bfd_vma) -2)
6915 skip = TRUE, relocate = TRUE;
6916 outrel.r_offset += (input_section->output_section->vma
6917 + input_section->output_offset);
6920 memset (&outrel, 0, sizeof outrel);
6925 || !h->def_regular))
6926 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6931 /* This symbol is local, or marked to become local. */
6932 if (sym_flags == STT_ARM_TFUNC)
6934 if (globals->symbian_p)
6938 /* On Symbian OS, the data segment and text segement
6939 can be relocated independently. Therefore, we
6940 must indicate the segment to which this
6941 relocation is relative. The BPABI allows us to
6942 use any symbol in the right segment; we just use
6943 the section symbol as it is convenient. (We
6944 cannot use the symbol given by "h" directly as it
6945 will not appear in the dynamic symbol table.)
6947 Note that the dynamic linker ignores the section
6948 symbol value, so we don't subtract osec->vma
6949 from the emitted reloc addend. */
6951 osec = sym_sec->output_section;
6953 osec = input_section->output_section;
6954 symbol = elf_section_data (osec)->dynindx;
6957 struct elf_link_hash_table *htab = elf_hash_table (info);
6959 if ((osec->flags & SEC_READONLY) == 0
6960 && htab->data_index_section != NULL)
6961 osec = htab->data_index_section;
6963 osec = htab->text_index_section;
6964 symbol = elf_section_data (osec)->dynindx;
6966 BFD_ASSERT (symbol != 0);
6969 /* On SVR4-ish systems, the dynamic loader cannot
6970 relocate the text and data segments independently,
6971 so the symbol does not matter. */
6973 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6974 if (globals->use_rel)
6977 outrel.r_addend += value;
6980 loc = sreloc->contents;
6981 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6982 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6984 /* If this reloc is against an external symbol, we do not want to
6985 fiddle with the addend. Otherwise, we need to include the symbol
6986 value so that it becomes an addend for the dynamic reloc. */
6988 return bfd_reloc_ok;
6990 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6991 contents, rel->r_offset, value,
6994 else switch (r_type)
6997 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6999 case R_ARM_XPC25: /* Arm BLX instruction. */
7002 case R_ARM_PC24: /* Arm B/BL instruction. */
7005 bfd_signed_vma branch_offset;
7006 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7008 if (r_type == R_ARM_XPC25)
7010 /* Check for Arm calling Arm function. */
7011 /* FIXME: Should we translate the instruction into a BL
7012 instruction instead ? */
7013 if (sym_flags != STT_ARM_TFUNC)
7014 (*_bfd_error_handler)
7015 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7017 h ? h->root.root.string : "(local)");
7019 else if (r_type == R_ARM_PC24)
7021 /* Check for Arm calling Thumb function. */
7022 if (sym_flags == STT_ARM_TFUNC)
7024 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7025 output_bfd, input_section,
7026 hit_data, sym_sec, rel->r_offset,
7027 signed_addend, value,
7029 return bfd_reloc_ok;
7031 return bfd_reloc_dangerous;
7035 /* Check if a stub has to be inserted because the
7036 destination is too far or we are changing mode. */
7037 if ( r_type == R_ARM_CALL
7038 || r_type == R_ARM_JUMP24
7039 || r_type == R_ARM_PLT32)
7043 /* If the call goes through a PLT entry, make sure to
7044 check distance to the right destination address. */
7045 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7047 value = (splt->output_section->vma
7048 + splt->output_offset
7050 *unresolved_reloc_p = FALSE;
7051 /* The PLT entry is in ARM mode, regardless of the
7053 sym_flags = STT_FUNC;
7056 from = (input_section->output_section->vma
7057 + input_section->output_offset
7059 branch_offset = (bfd_signed_vma)(value - from);
7061 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
7062 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
7063 || ((sym_flags == STT_ARM_TFUNC)
7064 && (((r_type == R_ARM_CALL) && !globals->use_blx)
7065 || (r_type == R_ARM_JUMP24)
7066 || (r_type == R_ARM_PLT32) ))
7069 /* The target is out of reach, so redirect the
7070 branch to the local stub for this function. */
7072 stub_entry = elf32_arm_get_stub_entry (input_section,
7075 if (stub_entry != NULL)
7076 value = (stub_entry->stub_offset
7077 + stub_entry->stub_sec->output_offset
7078 + stub_entry->stub_sec->output_section->vma);
7082 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7084 S is the address of the symbol in the relocation.
7085 P is address of the instruction being relocated.
7086 A is the addend (extracted from the instruction) in bytes.
7088 S is held in 'value'.
7089 P is the base address of the section containing the
7090 instruction plus the offset of the reloc into that
7092 (input_section->output_section->vma +
7093 input_section->output_offset +
7095 A is the addend, converted into bytes, ie:
7098 Note: None of these operations have knowledge of the pipeline
7099 size of the processor, thus it is up to the assembler to
7100 encode this information into the addend. */
7101 value -= (input_section->output_section->vma
7102 + input_section->output_offset);
7103 value -= rel->r_offset;
7104 if (globals->use_rel)
7105 value += (signed_addend << howto->size);
7107 /* RELA addends do not have to be adjusted by howto->size. */
7108 value += signed_addend;
7110 signed_addend = value;
7111 signed_addend >>= howto->rightshift;
7113 /* A branch to an undefined weak symbol is turned into a jump to
7114 the next instruction unless a PLT entry will be created.
7115 Do the same for local undefined symbols.
7116 The jump to the next instruction is optimized as a NOP depending
7117 on the architecture. */
7118 if (h ? (h->root.type == bfd_link_hash_undefweak
7119 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7120 : bfd_is_und_section (sym_sec))
7122 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7124 if (arch_has_arm_nop (globals))
7125 value |= 0x0320f000;
7127 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7131 /* Perform a signed range check. */
7132 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7133 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7134 return bfd_reloc_overflow;
7136 addend = (value & 2);
7138 value = (signed_addend & howto->dst_mask)
7139 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7141 if (r_type == R_ARM_CALL)
7143 /* Set the H bit in the BLX instruction. */
7144 if (sym_flags == STT_ARM_TFUNC)
7149 value &= ~(bfd_vma)(1 << 24);
7152 /* Select the correct instruction (BL or BLX). */
7153 /* Only if we are not handling a BL to a stub. In this
7154 case, mode switching is performed by the stub. */
7155 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7159 value &= ~(bfd_vma)(1 << 28);
7169 if (sym_flags == STT_ARM_TFUNC)
7173 case R_ARM_ABS32_NOI:
7179 if (sym_flags == STT_ARM_TFUNC)
7181 value -= (input_section->output_section->vma
7182 + input_section->output_offset + rel->r_offset);
7185 case R_ARM_REL32_NOI:
7187 value -= (input_section->output_section->vma
7188 + input_section->output_offset + rel->r_offset);
7192 value -= (input_section->output_section->vma
7193 + input_section->output_offset + rel->r_offset);
7194 value += signed_addend;
7195 if (! h || h->root.type != bfd_link_hash_undefweak)
7197 /* Check for overflow. */
7198 if ((value ^ (value >> 1)) & (1 << 30))
7199 return bfd_reloc_overflow;
7201 value &= 0x7fffffff;
7202 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7203 if (sym_flags == STT_ARM_TFUNC)
7208 bfd_put_32 (input_bfd, value, hit_data);
7209 return bfd_reloc_ok;
7213 if ((long) value > 0x7f || (long) value < -0x80)
7214 return bfd_reloc_overflow;
7216 bfd_put_8 (input_bfd, value, hit_data);
7217 return bfd_reloc_ok;
7222 if ((long) value > 0x7fff || (long) value < -0x8000)
7223 return bfd_reloc_overflow;
7225 bfd_put_16 (input_bfd, value, hit_data);
7226 return bfd_reloc_ok;
7228 case R_ARM_THM_ABS5:
7229 /* Support ldr and str instructions for the thumb. */
7230 if (globals->use_rel)
7232 /* Need to refetch addend. */
7233 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7234 /* ??? Need to determine shift amount from operand size. */
7235 addend >>= howto->rightshift;
7239 /* ??? Isn't value unsigned? */
7240 if ((long) value > 0x1f || (long) value < -0x10)
7241 return bfd_reloc_overflow;
7243 /* ??? Value needs to be properly shifted into place first. */
7244 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7245 bfd_put_16 (input_bfd, value, hit_data);
7246 return bfd_reloc_ok;
7248 case R_ARM_THM_ALU_PREL_11_0:
7249 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7252 bfd_signed_vma relocation;
7254 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7255 | bfd_get_16 (input_bfd, hit_data + 2);
7257 if (globals->use_rel)
7259 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7260 | ((insn & (1 << 26)) >> 15);
7261 if (insn & 0xf00000)
7262 signed_addend = -signed_addend;
7265 relocation = value + signed_addend;
7266 relocation -= (input_section->output_section->vma
7267 + input_section->output_offset
7270 value = abs (relocation);
7272 if (value >= 0x1000)
7273 return bfd_reloc_overflow;
7275 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7276 | ((value & 0x700) << 4)
7277 | ((value & 0x800) << 15);
7281 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7282 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7284 return bfd_reloc_ok;
7288 /* PR 10073: This reloc is not generated by the GNU toolchain,
7289 but it is supported for compatibility with third party libraries
7290 generated by other compilers, specifically the ARM/IAR. */
7293 bfd_signed_vma relocation;
7295 insn = bfd_get_16 (input_bfd, hit_data);
7297 if (globals->use_rel)
7298 addend = (insn & 0x00ff) << 2;
7300 relocation = value + addend;
7301 relocation -= (input_section->output_section->vma
7302 + input_section->output_offset
7305 value = abs (relocation);
7307 /* We do not check for overflow of this reloc. Although strictly
7308 speaking this is incorrect, it appears to be necessary in order
7309 to work with IAR generated relocs. Since GCC and GAS do not
7310 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7311 a problem for them. */
7314 insn = (insn & 0xff00) | (value >> 2);
7316 bfd_put_16 (input_bfd, insn, hit_data);
7318 return bfd_reloc_ok;
7321 case R_ARM_THM_PC12:
7322 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7325 bfd_signed_vma relocation;
7327 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7328 | bfd_get_16 (input_bfd, hit_data + 2);
7330 if (globals->use_rel)
7332 signed_addend = insn & 0xfff;
7333 if (!(insn & (1 << 23)))
7334 signed_addend = -signed_addend;
7337 relocation = value + signed_addend;
7338 relocation -= (input_section->output_section->vma
7339 + input_section->output_offset
7342 value = abs (relocation);
7344 if (value >= 0x1000)
7345 return bfd_reloc_overflow;
7347 insn = (insn & 0xff7ff000) | value;
7348 if (relocation >= 0)
7351 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7352 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7354 return bfd_reloc_ok;
7357 case R_ARM_THM_XPC22:
7358 case R_ARM_THM_CALL:
7359 case R_ARM_THM_JUMP24:
7360 /* Thumb BL (branch long instruction). */
7364 bfd_boolean overflow = FALSE;
7365 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7366 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7367 bfd_signed_vma reloc_signed_max;
7368 bfd_signed_vma reloc_signed_min;
7370 bfd_signed_vma signed_check;
7372 const int thumb2 = using_thumb2 (globals);
7374 /* A branch to an undefined weak symbol is turned into a jump to
7375 the next instruction unless a PLT entry will be created.
7376 The jump to the next instruction is optimized as a NOP.W for
7377 Thumb-2 enabled architectures. */
7378 if (h && h->root.type == bfd_link_hash_undefweak
7379 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7381 if (arch_has_thumb2_nop (globals))
7383 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7384 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7388 bfd_put_16 (input_bfd, 0xe000, hit_data);
7389 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7391 return bfd_reloc_ok;
7394 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7395 with Thumb-1) involving the J1 and J2 bits. */
7396 if (globals->use_rel)
7398 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7399 bfd_vma upper = upper_insn & 0x3ff;
7400 bfd_vma lower = lower_insn & 0x7ff;
7401 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7402 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7403 bfd_vma i1 = j1 ^ s ? 0 : 1;
7404 bfd_vma i2 = j2 ^ s ? 0 : 1;
7406 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7408 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7410 signed_addend = addend;
7413 if (r_type == R_ARM_THM_XPC22)
7415 /* Check for Thumb to Thumb call. */
7416 /* FIXME: Should we translate the instruction into a BL
7417 instruction instead ? */
7418 if (sym_flags == STT_ARM_TFUNC)
7419 (*_bfd_error_handler)
7420 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7422 h ? h->root.root.string : "(local)");
7426 /* If it is not a call to Thumb, assume call to Arm.
7427 If it is a call relative to a section name, then it is not a
7428 function call at all, but rather a long jump. Calls through
7429 the PLT do not require stubs. */
7430 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7431 && (h == NULL || splt == NULL
7432 || h->plt.offset == (bfd_vma) -1))
7434 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7436 /* Convert BL to BLX. */
7437 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7439 else if (( r_type != R_ARM_THM_CALL)
7440 && (r_type != R_ARM_THM_JUMP24))
7442 if (elf32_thumb_to_arm_stub
7443 (info, sym_name, input_bfd, output_bfd, input_section,
7444 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7446 return bfd_reloc_ok;
7448 return bfd_reloc_dangerous;
7451 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7452 && r_type == R_ARM_THM_CALL)
7454 /* Make sure this is a BL. */
7455 lower_insn |= 0x1800;
7459 /* Handle calls via the PLT. */
7460 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7462 value = (splt->output_section->vma
7463 + splt->output_offset
7465 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7467 /* If the Thumb BLX instruction is available, convert the
7468 BL to a BLX instruction to call the ARM-mode PLT entry. */
7469 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7470 sym_flags = STT_FUNC;
7474 /* Target the Thumb stub before the ARM PLT entry. */
7475 value -= PLT_THUMB_STUB_SIZE;
7476 sym_flags = STT_ARM_TFUNC;
7478 *unresolved_reloc_p = FALSE;
7481 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7483 /* Check if a stub has to be inserted because the destination
7486 bfd_signed_vma branch_offset;
7487 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7489 from = (input_section->output_section->vma
7490 + input_section->output_offset
7492 branch_offset = (bfd_signed_vma)(value - from);
7495 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7496 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7499 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7500 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7501 || ((sym_flags != STT_ARM_TFUNC)
7502 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7503 || r_type == R_ARM_THM_JUMP24)))
7505 /* The target is out of reach or we are changing modes, so
7506 redirect the branch to the local stub for this
7508 stub_entry = elf32_arm_get_stub_entry (input_section,
7511 if (stub_entry != NULL)
7512 value = (stub_entry->stub_offset
7513 + stub_entry->stub_sec->output_offset
7514 + stub_entry->stub_sec->output_section->vma);
7516 /* If this call becomes a call to Arm, force BLX. */
7517 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7520 && !arm_stub_is_thumb (stub_entry->stub_type))
7521 || (sym_flags != STT_ARM_TFUNC))
7522 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7527 relocation = value + signed_addend;
7529 relocation -= (input_section->output_section->vma
7530 + input_section->output_offset
7533 check = relocation >> howto->rightshift;
7535 /* If this is a signed value, the rightshift just dropped
7536 leading 1 bits (assuming twos complement). */
7537 if ((bfd_signed_vma) relocation >= 0)
7538 signed_check = check;
7540 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7542 /* Calculate the permissable maximum and minimum values for
7543 this relocation according to whether we're relocating for
7545 bitsize = howto->bitsize;
7548 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7549 reloc_signed_min = ~reloc_signed_max;
7551 /* Assumes two's complement. */
7552 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7555 if ((lower_insn & 0x5000) == 0x4000)
7556 /* For a BLX instruction, make sure that the relocation is rounded up
7557 to a word boundary. This follows the semantics of the instruction
7558 which specifies that bit 1 of the target address will come from bit
7559 1 of the base address. */
7560 relocation = (relocation + 2) & ~ 3;
7562 /* Put RELOCATION back into the insn. Assumes two's complement.
7563 We use the Thumb-2 encoding, which is safe even if dealing with
7564 a Thumb-1 instruction by virtue of our overflow check above. */
7565 reloc_sign = (signed_check < 0) ? 1 : 0;
7566 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7567 | ((relocation >> 12) & 0x3ff)
7568 | (reloc_sign << 10);
7569 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7570 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7571 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7572 | ((relocation >> 1) & 0x7ff);
7574 /* Put the relocated value back in the object file: */
7575 bfd_put_16 (input_bfd, upper_insn, hit_data);
7576 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7578 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7582 case R_ARM_THM_JUMP19:
7583 /* Thumb32 conditional branch instruction. */
7586 bfd_boolean overflow = FALSE;
7587 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7588 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7589 bfd_signed_vma reloc_signed_max = 0xffffe;
7590 bfd_signed_vma reloc_signed_min = -0x100000;
7591 bfd_signed_vma signed_check;
7593 /* Need to refetch the addend, reconstruct the top three bits,
7594 and squish the two 11 bit pieces together. */
7595 if (globals->use_rel)
7597 bfd_vma S = (upper_insn & 0x0400) >> 10;
7598 bfd_vma upper = (upper_insn & 0x003f);
7599 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7600 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7601 bfd_vma lower = (lower_insn & 0x07ff);
7606 upper -= 0x0100; /* Sign extend. */
7608 addend = (upper << 12) | (lower << 1);
7609 signed_addend = addend;
7612 /* Handle calls via the PLT. */
7613 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7615 value = (splt->output_section->vma
7616 + splt->output_offset
7618 /* Target the Thumb stub before the ARM PLT entry. */
7619 value -= PLT_THUMB_STUB_SIZE;
7620 *unresolved_reloc_p = FALSE;
7623 /* ??? Should handle interworking? GCC might someday try to
7624 use this for tail calls. */
7626 relocation = value + signed_addend;
7627 relocation -= (input_section->output_section->vma
7628 + input_section->output_offset
7630 signed_check = (bfd_signed_vma) relocation;
7632 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7635 /* Put RELOCATION back into the insn. */
7637 bfd_vma S = (relocation & 0x00100000) >> 20;
7638 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7639 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7640 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7641 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7643 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7644 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7647 /* Put the relocated value back in the object file: */
7648 bfd_put_16 (input_bfd, upper_insn, hit_data);
7649 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7651 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7654 case R_ARM_THM_JUMP11:
7655 case R_ARM_THM_JUMP8:
7656 case R_ARM_THM_JUMP6:
7657 /* Thumb B (branch) instruction). */
7659 bfd_signed_vma relocation;
7660 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7661 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7662 bfd_signed_vma signed_check;
7664 /* CZB cannot jump backward. */
7665 if (r_type == R_ARM_THM_JUMP6)
7666 reloc_signed_min = 0;
7668 if (globals->use_rel)
7670 /* Need to refetch addend. */
7671 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7672 if (addend & ((howto->src_mask + 1) >> 1))
7675 signed_addend &= ~ howto->src_mask;
7676 signed_addend |= addend;
7679 signed_addend = addend;
7680 /* The value in the insn has been right shifted. We need to
7681 undo this, so that we can perform the address calculation
7682 in terms of bytes. */
7683 signed_addend <<= howto->rightshift;
7685 relocation = value + signed_addend;
7687 relocation -= (input_section->output_section->vma
7688 + input_section->output_offset
7691 relocation >>= howto->rightshift;
7692 signed_check = relocation;
7694 if (r_type == R_ARM_THM_JUMP6)
7695 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7697 relocation &= howto->dst_mask;
7698 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7700 bfd_put_16 (input_bfd, relocation, hit_data);
7702 /* Assumes two's complement. */
7703 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7704 return bfd_reloc_overflow;
7706 return bfd_reloc_ok;
7709 case R_ARM_ALU_PCREL7_0:
7710 case R_ARM_ALU_PCREL15_8:
7711 case R_ARM_ALU_PCREL23_15:
7716 insn = bfd_get_32 (input_bfd, hit_data);
7717 if (globals->use_rel)
7719 /* Extract the addend. */
7720 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7721 signed_addend = addend;
7723 relocation = value + signed_addend;
7725 relocation -= (input_section->output_section->vma
7726 + input_section->output_offset
7728 insn = (insn & ~0xfff)
7729 | ((howto->bitpos << 7) & 0xf00)
7730 | ((relocation >> howto->bitpos) & 0xff);
7731 bfd_put_32 (input_bfd, value, hit_data);
7733 return bfd_reloc_ok;
7735 case R_ARM_GNU_VTINHERIT:
7736 case R_ARM_GNU_VTENTRY:
7737 return bfd_reloc_ok;
7739 case R_ARM_GOTOFF32:
7740 /* Relocation is relative to the start of the
7741 global offset table. */
7743 BFD_ASSERT (sgot != NULL);
7745 return bfd_reloc_notsupported;
7747 /* If we are addressing a Thumb function, we need to adjust the
7748 address by one, so that attempts to call the function pointer will
7749 correctly interpret it as Thumb code. */
7750 if (sym_flags == STT_ARM_TFUNC)
7753 /* Note that sgot->output_offset is not involved in this
7754 calculation. We always want the start of .got. If we
7755 define _GLOBAL_OFFSET_TABLE in a different way, as is
7756 permitted by the ABI, we might have to change this
7758 value -= sgot->output_section->vma;
7759 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7760 contents, rel->r_offset, value,
7764 /* Use global offset table as symbol value. */
7765 BFD_ASSERT (sgot != NULL);
7768 return bfd_reloc_notsupported;
7770 *unresolved_reloc_p = FALSE;
7771 value = sgot->output_section->vma;
7772 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7773 contents, rel->r_offset, value,
7777 case R_ARM_GOT_PREL:
7778 /* Relocation is to the entry for this symbol in the
7779 global offset table. */
7781 return bfd_reloc_notsupported;
7788 off = h->got.offset;
7789 BFD_ASSERT (off != (bfd_vma) -1);
7790 dyn = globals->root.dynamic_sections_created;
7792 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7794 && SYMBOL_REFERENCES_LOCAL (info, h))
7795 || (ELF_ST_VISIBILITY (h->other)
7796 && h->root.type == bfd_link_hash_undefweak))
7798 /* This is actually a static link, or it is a -Bsymbolic link
7799 and the symbol is defined locally. We must initialize this
7800 entry in the global offset table. Since the offset must
7801 always be a multiple of 4, we use the least significant bit
7802 to record whether we have initialized it already.
7804 When doing a dynamic link, we create a .rel(a).got relocation
7805 entry to initialize the value. This is done in the
7806 finish_dynamic_symbol routine. */
7811 /* If we are addressing a Thumb function, we need to
7812 adjust the address by one, so that attempts to
7813 call the function pointer will correctly
7814 interpret it as Thumb code. */
7815 if (sym_flags == STT_ARM_TFUNC)
7818 bfd_put_32 (output_bfd, value, sgot->contents + off);
7823 *unresolved_reloc_p = FALSE;
7825 value = sgot->output_offset + off;
7831 BFD_ASSERT (local_got_offsets != NULL &&
7832 local_got_offsets[r_symndx] != (bfd_vma) -1);
7834 off = local_got_offsets[r_symndx];
7836 /* The offset must always be a multiple of 4. We use the
7837 least significant bit to record whether we have already
7838 generated the necessary reloc. */
7843 /* If we are addressing a Thumb function, we need to
7844 adjust the address by one, so that attempts to
7845 call the function pointer will correctly
7846 interpret it as Thumb code. */
7847 if (sym_flags == STT_ARM_TFUNC)
7850 if (globals->use_rel)
7851 bfd_put_32 (output_bfd, value, sgot->contents + off);
7856 Elf_Internal_Rela outrel;
7859 srelgot = (bfd_get_section_by_name
7860 (dynobj, RELOC_SECTION (globals, ".got")));
7861 BFD_ASSERT (srelgot != NULL);
7863 outrel.r_addend = addend + value;
7864 outrel.r_offset = (sgot->output_section->vma
7865 + sgot->output_offset
7867 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7868 loc = srelgot->contents;
7869 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7870 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7873 local_got_offsets[r_symndx] |= 1;
7876 value = sgot->output_offset + off;
7878 if (r_type != R_ARM_GOT32)
7879 value += sgot->output_section->vma;
7881 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7882 contents, rel->r_offset, value,
7885 case R_ARM_TLS_LDO32:
7886 value = value - dtpoff_base (info);
7888 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7889 contents, rel->r_offset, value,
7892 case R_ARM_TLS_LDM32:
7896 if (globals->sgot == NULL)
7899 off = globals->tls_ldm_got.offset;
7905 /* If we don't know the module number, create a relocation
7909 Elf_Internal_Rela outrel;
7912 if (globals->srelgot == NULL)
7915 outrel.r_addend = 0;
7916 outrel.r_offset = (globals->sgot->output_section->vma
7917 + globals->sgot->output_offset + off);
7918 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7920 if (globals->use_rel)
7921 bfd_put_32 (output_bfd, outrel.r_addend,
7922 globals->sgot->contents + off);
7924 loc = globals->srelgot->contents;
7925 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7926 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7929 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7931 globals->tls_ldm_got.offset |= 1;
7934 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7935 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7937 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7938 contents, rel->r_offset, value,
7942 case R_ARM_TLS_GD32:
7943 case R_ARM_TLS_IE32:
7949 if (globals->sgot == NULL)
7956 dyn = globals->root.dynamic_sections_created;
7957 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7959 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7961 *unresolved_reloc_p = FALSE;
7964 off = h->got.offset;
7965 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7969 if (local_got_offsets == NULL)
7971 off = local_got_offsets[r_symndx];
7972 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7975 if (tls_type == GOT_UNKNOWN)
7982 bfd_boolean need_relocs = FALSE;
7983 Elf_Internal_Rela outrel;
7984 bfd_byte *loc = NULL;
7987 /* The GOT entries have not been initialized yet. Do it
7988 now, and emit any relocations. If both an IE GOT and a
7989 GD GOT are necessary, we emit the GD first. */
7991 if ((info->shared || indx != 0)
7993 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7994 || h->root.type != bfd_link_hash_undefweak))
7997 if (globals->srelgot == NULL)
7999 loc = globals->srelgot->contents;
8000 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8003 if (tls_type & GOT_TLS_GD)
8007 outrel.r_addend = 0;
8008 outrel.r_offset = (globals->sgot->output_section->vma
8009 + globals->sgot->output_offset
8011 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8013 if (globals->use_rel)
8014 bfd_put_32 (output_bfd, outrel.r_addend,
8015 globals->sgot->contents + cur_off);
8017 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8018 globals->srelgot->reloc_count++;
8019 loc += RELOC_SIZE (globals);
8022 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8023 globals->sgot->contents + cur_off + 4);
8026 outrel.r_addend = 0;
8027 outrel.r_info = ELF32_R_INFO (indx,
8028 R_ARM_TLS_DTPOFF32);
8029 outrel.r_offset += 4;
8031 if (globals->use_rel)
8032 bfd_put_32 (output_bfd, outrel.r_addend,
8033 globals->sgot->contents + cur_off + 4);
8036 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8037 globals->srelgot->reloc_count++;
8038 loc += RELOC_SIZE (globals);
8043 /* If we are not emitting relocations for a
8044 general dynamic reference, then we must be in a
8045 static link or an executable link with the
8046 symbol binding locally. Mark it as belonging
8047 to module 1, the executable. */
8048 bfd_put_32 (output_bfd, 1,
8049 globals->sgot->contents + cur_off);
8050 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8051 globals->sgot->contents + cur_off + 4);
8057 if (tls_type & GOT_TLS_IE)
8062 outrel.r_addend = value - dtpoff_base (info);
8064 outrel.r_addend = 0;
8065 outrel.r_offset = (globals->sgot->output_section->vma
8066 + globals->sgot->output_offset
8068 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8070 if (globals->use_rel)
8071 bfd_put_32 (output_bfd, outrel.r_addend,
8072 globals->sgot->contents + cur_off);
8074 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8075 globals->srelgot->reloc_count++;
8076 loc += RELOC_SIZE (globals);
8079 bfd_put_32 (output_bfd, tpoff (info, value),
8080 globals->sgot->contents + cur_off);
8087 local_got_offsets[r_symndx] |= 1;
8090 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8092 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8093 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8095 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8096 contents, rel->r_offset, value,
8100 case R_ARM_TLS_LE32:
8103 (*_bfd_error_handler)
8104 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8105 input_bfd, input_section,
8106 (long) rel->r_offset, howto->name);
8107 return (bfd_reloc_status_type) FALSE;
8110 value = tpoff (info, value);
8112 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8113 contents, rel->r_offset, value,
8117 if (globals->fix_v4bx)
8119 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8121 /* Ensure that we have a BX instruction. */
8122 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8124 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8126 /* Branch to veneer. */
8128 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8129 glue_addr -= input_section->output_section->vma
8130 + input_section->output_offset
8131 + rel->r_offset + 8;
8132 insn = (insn & 0xf0000000) | 0x0a000000
8133 | ((glue_addr >> 2) & 0x00ffffff);
8137 /* Preserve Rm (lowest four bits) and the condition code
8138 (highest four bits). Other bits encode MOV PC,Rm. */
8139 insn = (insn & 0xf000000f) | 0x01a0f000;
8142 bfd_put_32 (input_bfd, insn, hit_data);
8144 return bfd_reloc_ok;
8146 case R_ARM_MOVW_ABS_NC:
8147 case R_ARM_MOVT_ABS:
8148 case R_ARM_MOVW_PREL_NC:
8149 case R_ARM_MOVT_PREL:
8150 /* Until we properly support segment-base-relative addressing then
8151 we assume the segment base to be zero, as for the group relocations.
8152 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8153 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8154 case R_ARM_MOVW_BREL_NC:
8155 case R_ARM_MOVW_BREL:
8156 case R_ARM_MOVT_BREL:
8158 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8160 if (globals->use_rel)
8162 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8163 signed_addend = (addend ^ 0x8000) - 0x8000;
8166 value += signed_addend;
8168 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8169 value -= (input_section->output_section->vma
8170 + input_section->output_offset + rel->r_offset);
8172 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8173 return bfd_reloc_overflow;
8175 if (sym_flags == STT_ARM_TFUNC)
8178 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8179 || r_type == R_ARM_MOVT_BREL)
8183 insn |= value & 0xfff;
8184 insn |= (value & 0xf000) << 4;
8185 bfd_put_32 (input_bfd, insn, hit_data);
8187 return bfd_reloc_ok;
8189 case R_ARM_THM_MOVW_ABS_NC:
8190 case R_ARM_THM_MOVT_ABS:
8191 case R_ARM_THM_MOVW_PREL_NC:
8192 case R_ARM_THM_MOVT_PREL:
8193 /* Until we properly support segment-base-relative addressing then
8194 we assume the segment base to be zero, as for the above relocations.
8195 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8196 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8197 as R_ARM_THM_MOVT_ABS. */
8198 case R_ARM_THM_MOVW_BREL_NC:
8199 case R_ARM_THM_MOVW_BREL:
8200 case R_ARM_THM_MOVT_BREL:
8204 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8205 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8207 if (globals->use_rel)
8209 addend = ((insn >> 4) & 0xf000)
8210 | ((insn >> 15) & 0x0800)
8211 | ((insn >> 4) & 0x0700)
8213 signed_addend = (addend ^ 0x8000) - 0x8000;
8216 value += signed_addend;
8218 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8219 value -= (input_section->output_section->vma
8220 + input_section->output_offset + rel->r_offset);
8222 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8223 return bfd_reloc_overflow;
8225 if (sym_flags == STT_ARM_TFUNC)
8228 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8229 || r_type == R_ARM_THM_MOVT_BREL)
8233 insn |= (value & 0xf000) << 4;
8234 insn |= (value & 0x0800) << 15;
8235 insn |= (value & 0x0700) << 4;
8236 insn |= (value & 0x00ff);
8238 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8239 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8241 return bfd_reloc_ok;
8243 case R_ARM_ALU_PC_G0_NC:
8244 case R_ARM_ALU_PC_G1_NC:
8245 case R_ARM_ALU_PC_G0:
8246 case R_ARM_ALU_PC_G1:
8247 case R_ARM_ALU_PC_G2:
8248 case R_ARM_ALU_SB_G0_NC:
8249 case R_ARM_ALU_SB_G1_NC:
8250 case R_ARM_ALU_SB_G0:
8251 case R_ARM_ALU_SB_G1:
8252 case R_ARM_ALU_SB_G2:
8254 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8255 bfd_vma pc = input_section->output_section->vma
8256 + input_section->output_offset + rel->r_offset;
8257 /* sb should be the origin of the *segment* containing the symbol.
8258 It is not clear how to obtain this OS-dependent value, so we
8259 make an arbitrary choice of zero. */
8263 bfd_signed_vma signed_value;
8266 /* Determine which group of bits to select. */
8269 case R_ARM_ALU_PC_G0_NC:
8270 case R_ARM_ALU_PC_G0:
8271 case R_ARM_ALU_SB_G0_NC:
8272 case R_ARM_ALU_SB_G0:
8276 case R_ARM_ALU_PC_G1_NC:
8277 case R_ARM_ALU_PC_G1:
8278 case R_ARM_ALU_SB_G1_NC:
8279 case R_ARM_ALU_SB_G1:
8283 case R_ARM_ALU_PC_G2:
8284 case R_ARM_ALU_SB_G2:
8292 /* If REL, extract the addend from the insn. If RELA, it will
8293 have already been fetched for us. */
8294 if (globals->use_rel)
8297 bfd_vma constant = insn & 0xff;
8298 bfd_vma rotation = (insn & 0xf00) >> 8;
8301 signed_addend = constant;
8304 /* Compensate for the fact that in the instruction, the
8305 rotation is stored in multiples of 2 bits. */
8308 /* Rotate "constant" right by "rotation" bits. */
8309 signed_addend = (constant >> rotation) |
8310 (constant << (8 * sizeof (bfd_vma) - rotation));
8313 /* Determine if the instruction is an ADD or a SUB.
8314 (For REL, this determines the sign of the addend.) */
8315 negative = identify_add_or_sub (insn);
8318 (*_bfd_error_handler)
8319 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8320 input_bfd, input_section,
8321 (long) rel->r_offset, howto->name);
8322 return bfd_reloc_overflow;
8325 signed_addend *= negative;
8328 /* Compute the value (X) to go in the place. */
8329 if (r_type == R_ARM_ALU_PC_G0_NC
8330 || r_type == R_ARM_ALU_PC_G1_NC
8331 || r_type == R_ARM_ALU_PC_G0
8332 || r_type == R_ARM_ALU_PC_G1
8333 || r_type == R_ARM_ALU_PC_G2)
8335 signed_value = value - pc + signed_addend;
8337 /* Section base relative. */
8338 signed_value = value - sb + signed_addend;
8340 /* If the target symbol is a Thumb function, then set the
8341 Thumb bit in the address. */
8342 if (sym_flags == STT_ARM_TFUNC)
8345 /* Calculate the value of the relevant G_n, in encoded
8346 constant-with-rotation format. */
8347 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8350 /* Check for overflow if required. */
8351 if ((r_type == R_ARM_ALU_PC_G0
8352 || r_type == R_ARM_ALU_PC_G1
8353 || r_type == R_ARM_ALU_PC_G2
8354 || r_type == R_ARM_ALU_SB_G0
8355 || r_type == R_ARM_ALU_SB_G1
8356 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8358 (*_bfd_error_handler)
8359 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8360 input_bfd, input_section,
8361 (long) rel->r_offset, abs (signed_value), howto->name);
8362 return bfd_reloc_overflow;
8365 /* Mask out the value and the ADD/SUB part of the opcode; take care
8366 not to destroy the S bit. */
8369 /* Set the opcode according to whether the value to go in the
8370 place is negative. */
8371 if (signed_value < 0)
8376 /* Encode the offset. */
8379 bfd_put_32 (input_bfd, insn, hit_data);
8381 return bfd_reloc_ok;
8383 case R_ARM_LDR_PC_G0:
8384 case R_ARM_LDR_PC_G1:
8385 case R_ARM_LDR_PC_G2:
8386 case R_ARM_LDR_SB_G0:
8387 case R_ARM_LDR_SB_G1:
8388 case R_ARM_LDR_SB_G2:
8390 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8391 bfd_vma pc = input_section->output_section->vma
8392 + input_section->output_offset + rel->r_offset;
8393 bfd_vma sb = 0; /* See note above. */
8395 bfd_signed_vma signed_value;
8398 /* Determine which groups of bits to calculate. */
8401 case R_ARM_LDR_PC_G0:
8402 case R_ARM_LDR_SB_G0:
8406 case R_ARM_LDR_PC_G1:
8407 case R_ARM_LDR_SB_G1:
8411 case R_ARM_LDR_PC_G2:
8412 case R_ARM_LDR_SB_G2:
8420 /* If REL, extract the addend from the insn. If RELA, it will
8421 have already been fetched for us. */
8422 if (globals->use_rel)
8424 int negative = (insn & (1 << 23)) ? 1 : -1;
8425 signed_addend = negative * (insn & 0xfff);
8428 /* Compute the value (X) to go in the place. */
8429 if (r_type == R_ARM_LDR_PC_G0
8430 || r_type == R_ARM_LDR_PC_G1
8431 || r_type == R_ARM_LDR_PC_G2)
8433 signed_value = value - pc + signed_addend;
8435 /* Section base relative. */
8436 signed_value = value - sb + signed_addend;
8438 /* Calculate the value of the relevant G_{n-1} to obtain
8439 the residual at that stage. */
8440 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8442 /* Check for overflow. */
8443 if (residual >= 0x1000)
8445 (*_bfd_error_handler)
8446 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8447 input_bfd, input_section,
8448 (long) rel->r_offset, abs (signed_value), howto->name);
8449 return bfd_reloc_overflow;
8452 /* Mask out the value and U bit. */
8455 /* Set the U bit if the value to go in the place is non-negative. */
8456 if (signed_value >= 0)
8459 /* Encode the offset. */
8462 bfd_put_32 (input_bfd, insn, hit_data);
8464 return bfd_reloc_ok;
8466 case R_ARM_LDRS_PC_G0:
8467 case R_ARM_LDRS_PC_G1:
8468 case R_ARM_LDRS_PC_G2:
8469 case R_ARM_LDRS_SB_G0:
8470 case R_ARM_LDRS_SB_G1:
8471 case R_ARM_LDRS_SB_G2:
8473 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8474 bfd_vma pc = input_section->output_section->vma
8475 + input_section->output_offset + rel->r_offset;
8476 bfd_vma sb = 0; /* See note above. */
8478 bfd_signed_vma signed_value;
8481 /* Determine which groups of bits to calculate. */
8484 case R_ARM_LDRS_PC_G0:
8485 case R_ARM_LDRS_SB_G0:
8489 case R_ARM_LDRS_PC_G1:
8490 case R_ARM_LDRS_SB_G1:
8494 case R_ARM_LDRS_PC_G2:
8495 case R_ARM_LDRS_SB_G2:
8503 /* If REL, extract the addend from the insn. If RELA, it will
8504 have already been fetched for us. */
8505 if (globals->use_rel)
8507 int negative = (insn & (1 << 23)) ? 1 : -1;
8508 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8511 /* Compute the value (X) to go in the place. */
8512 if (r_type == R_ARM_LDRS_PC_G0
8513 || r_type == R_ARM_LDRS_PC_G1
8514 || r_type == R_ARM_LDRS_PC_G2)
8516 signed_value = value - pc + signed_addend;
8518 /* Section base relative. */
8519 signed_value = value - sb + signed_addend;
8521 /* Calculate the value of the relevant G_{n-1} to obtain
8522 the residual at that stage. */
8523 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8525 /* Check for overflow. */
8526 if (residual >= 0x100)
8528 (*_bfd_error_handler)
8529 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8530 input_bfd, input_section,
8531 (long) rel->r_offset, abs (signed_value), howto->name);
8532 return bfd_reloc_overflow;
8535 /* Mask out the value and U bit. */
8538 /* Set the U bit if the value to go in the place is non-negative. */
8539 if (signed_value >= 0)
8542 /* Encode the offset. */
8543 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8545 bfd_put_32 (input_bfd, insn, hit_data);
8547 return bfd_reloc_ok;
8549 case R_ARM_LDC_PC_G0:
8550 case R_ARM_LDC_PC_G1:
8551 case R_ARM_LDC_PC_G2:
8552 case R_ARM_LDC_SB_G0:
8553 case R_ARM_LDC_SB_G1:
8554 case R_ARM_LDC_SB_G2:
8556 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8557 bfd_vma pc = input_section->output_section->vma
8558 + input_section->output_offset + rel->r_offset;
8559 bfd_vma sb = 0; /* See note above. */
8561 bfd_signed_vma signed_value;
8564 /* Determine which groups of bits to calculate. */
8567 case R_ARM_LDC_PC_G0:
8568 case R_ARM_LDC_SB_G0:
8572 case R_ARM_LDC_PC_G1:
8573 case R_ARM_LDC_SB_G1:
8577 case R_ARM_LDC_PC_G2:
8578 case R_ARM_LDC_SB_G2:
8586 /* If REL, extract the addend from the insn. If RELA, it will
8587 have already been fetched for us. */
8588 if (globals->use_rel)
8590 int negative = (insn & (1 << 23)) ? 1 : -1;
8591 signed_addend = negative * ((insn & 0xff) << 2);
8594 /* Compute the value (X) to go in the place. */
8595 if (r_type == R_ARM_LDC_PC_G0
8596 || r_type == R_ARM_LDC_PC_G1
8597 || r_type == R_ARM_LDC_PC_G2)
8599 signed_value = value - pc + signed_addend;
8601 /* Section base relative. */
8602 signed_value = value - sb + signed_addend;
8604 /* Calculate the value of the relevant G_{n-1} to obtain
8605 the residual at that stage. */
8606 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8608 /* Check for overflow. (The absolute value to go in the place must be
8609 divisible by four and, after having been divided by four, must
8610 fit in eight bits.) */
8611 if ((residual & 0x3) != 0 || residual >= 0x400)
8613 (*_bfd_error_handler)
8614 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8615 input_bfd, input_section,
8616 (long) rel->r_offset, abs (signed_value), howto->name);
8617 return bfd_reloc_overflow;
8620 /* Mask out the value and U bit. */
8623 /* Set the U bit if the value to go in the place is non-negative. */
8624 if (signed_value >= 0)
8627 /* Encode the offset. */
8628 insn |= residual >> 2;
8630 bfd_put_32 (input_bfd, insn, hit_data);
8632 return bfd_reloc_ok;
8635 return bfd_reloc_notsupported;
8639 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8641 arm_add_to_rel (bfd * abfd,
8643 reloc_howto_type * howto,
8644 bfd_signed_vma increment)
8646 bfd_signed_vma addend;
8648 if (howto->type == R_ARM_THM_CALL
8649 || howto->type == R_ARM_THM_JUMP24)
8651 int upper_insn, lower_insn;
8654 upper_insn = bfd_get_16 (abfd, address);
8655 lower_insn = bfd_get_16 (abfd, address + 2);
8656 upper = upper_insn & 0x7ff;
8657 lower = lower_insn & 0x7ff;
8659 addend = (upper << 12) | (lower << 1);
8660 addend += increment;
8663 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8664 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8666 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8667 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8673 contents = bfd_get_32 (abfd, address);
8675 /* Get the (signed) value from the instruction. */
8676 addend = contents & howto->src_mask;
8677 if (addend & ((howto->src_mask + 1) >> 1))
8679 bfd_signed_vma mask;
8682 mask &= ~ howto->src_mask;
8686 /* Add in the increment, (which is a byte value). */
8687 switch (howto->type)
8690 addend += increment;
8697 addend <<= howto->size;
8698 addend += increment;
8700 /* Should we check for overflow here ? */
8702 /* Drop any undesired bits. */
8703 addend >>= howto->rightshift;
8707 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8709 bfd_put_32 (abfd, contents, address);
8713 #define IS_ARM_TLS_RELOC(R_TYPE) \
8714 ((R_TYPE) == R_ARM_TLS_GD32 \
8715 || (R_TYPE) == R_ARM_TLS_LDO32 \
8716 || (R_TYPE) == R_ARM_TLS_LDM32 \
8717 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8718 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8719 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8720 || (R_TYPE) == R_ARM_TLS_LE32 \
8721 || (R_TYPE) == R_ARM_TLS_IE32)
8723 /* Relocate an ARM ELF section. */
8726 elf32_arm_relocate_section (bfd * output_bfd,
8727 struct bfd_link_info * info,
8729 asection * input_section,
8730 bfd_byte * contents,
8731 Elf_Internal_Rela * relocs,
8732 Elf_Internal_Sym * local_syms,
8733 asection ** local_sections)
8735 Elf_Internal_Shdr *symtab_hdr;
8736 struct elf_link_hash_entry **sym_hashes;
8737 Elf_Internal_Rela *rel;
8738 Elf_Internal_Rela *relend;
8740 struct elf32_arm_link_hash_table * globals;
8742 globals = elf32_arm_hash_table (info);
8744 symtab_hdr = & elf_symtab_hdr (input_bfd);
8745 sym_hashes = elf_sym_hashes (input_bfd);
8748 relend = relocs + input_section->reloc_count;
8749 for (; rel < relend; rel++)
8752 reloc_howto_type * howto;
8753 unsigned long r_symndx;
8754 Elf_Internal_Sym * sym;
8756 struct elf_link_hash_entry * h;
8758 bfd_reloc_status_type r;
8761 bfd_boolean unresolved_reloc = FALSE;
8762 char *error_message = NULL;
8764 r_symndx = ELF32_R_SYM (rel->r_info);
8765 r_type = ELF32_R_TYPE (rel->r_info);
8766 r_type = arm_real_reloc_type (globals, r_type);
8768 if ( r_type == R_ARM_GNU_VTENTRY
8769 || r_type == R_ARM_GNU_VTINHERIT)
8772 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8773 howto = bfd_reloc.howto;
8779 if (r_symndx < symtab_hdr->sh_info)
8781 sym = local_syms + r_symndx;
8782 sym_type = ELF32_ST_TYPE (sym->st_info);
8783 sec = local_sections[r_symndx];
8785 /* An object file might have a reference to a local
8786 undefined symbol. This is a daft object file, but we
8787 should at least do something about it. V4BX & NONE
8788 relocations do not use the symbol and are explicitly
8789 allowed to use the undefined symbol, so allow those. */
8790 if (r_type != R_ARM_V4BX
8791 && r_type != R_ARM_NONE
8792 && bfd_is_und_section (sec)
8793 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8795 if (!info->callbacks->undefined_symbol
8796 (info, bfd_elf_string_from_elf_section
8797 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8798 input_bfd, input_section,
8799 rel->r_offset, TRUE))
8803 if (globals->use_rel)
8805 relocation = (sec->output_section->vma
8806 + sec->output_offset
8808 if (!info->relocatable
8809 && (sec->flags & SEC_MERGE)
8810 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8813 bfd_vma addend, value;
8817 case R_ARM_MOVW_ABS_NC:
8818 case R_ARM_MOVT_ABS:
8819 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8820 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8821 addend = (addend ^ 0x8000) - 0x8000;
8824 case R_ARM_THM_MOVW_ABS_NC:
8825 case R_ARM_THM_MOVT_ABS:
8826 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8828 value |= bfd_get_16 (input_bfd,
8829 contents + rel->r_offset + 2);
8830 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8831 | ((value & 0x04000000) >> 15);
8832 addend = (addend ^ 0x8000) - 0x8000;
8836 if (howto->rightshift
8837 || (howto->src_mask & (howto->src_mask + 1)))
8839 (*_bfd_error_handler)
8840 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8841 input_bfd, input_section,
8842 (long) rel->r_offset, howto->name);
8846 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8848 /* Get the (signed) value from the instruction. */
8849 addend = value & howto->src_mask;
8850 if (addend & ((howto->src_mask + 1) >> 1))
8852 bfd_signed_vma mask;
8855 mask &= ~ howto->src_mask;
8863 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8865 addend += msec->output_section->vma + msec->output_offset;
8867 /* Cases here must match those in the preceeding
8868 switch statement. */
8871 case R_ARM_MOVW_ABS_NC:
8872 case R_ARM_MOVT_ABS:
8873 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8875 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8878 case R_ARM_THM_MOVW_ABS_NC:
8879 case R_ARM_THM_MOVT_ABS:
8880 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8881 | (addend & 0xff) | ((addend & 0x0800) << 15);
8882 bfd_put_16 (input_bfd, value >> 16,
8883 contents + rel->r_offset);
8884 bfd_put_16 (input_bfd, value,
8885 contents + rel->r_offset + 2);
8889 value = (value & ~ howto->dst_mask)
8890 | (addend & howto->dst_mask);
8891 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8897 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8903 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8904 r_symndx, symtab_hdr, sym_hashes,
8906 unresolved_reloc, warned);
8911 if (sec != NULL && elf_discarded_section (sec))
8913 /* For relocs against symbols from removed linkonce sections,
8914 or sections discarded by a linker script, we just want the
8915 section contents zeroed. Avoid any special processing. */
8916 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8922 if (info->relocatable)
8924 /* This is a relocatable link. We don't have to change
8925 anything, unless the reloc is against a section symbol,
8926 in which case we have to adjust according to where the
8927 section symbol winds up in the output section. */
8928 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8930 if (globals->use_rel)
8931 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8932 howto, (bfd_signed_vma) sec->output_offset);
8934 rel->r_addend += sec->output_offset;
8940 name = h->root.root.string;
8943 name = (bfd_elf_string_from_elf_section
8944 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8945 if (name == NULL || *name == '\0')
8946 name = bfd_section_name (input_bfd, sec);
8950 && r_type != R_ARM_NONE
8952 || h->root.type == bfd_link_hash_defined
8953 || h->root.type == bfd_link_hash_defweak)
8954 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8956 (*_bfd_error_handler)
8957 ((sym_type == STT_TLS
8958 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8959 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8962 (long) rel->r_offset,
8967 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8968 input_section, contents, rel,
8969 relocation, info, sec, name,
8970 (h ? ELF_ST_TYPE (h->type) :
8971 ELF_ST_TYPE (sym->st_info)), h,
8972 &unresolved_reloc, &error_message);
8974 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8975 because such sections are not SEC_ALLOC and thus ld.so will
8976 not process them. */
8977 if (unresolved_reloc
8978 && !((input_section->flags & SEC_DEBUGGING) != 0
8981 (*_bfd_error_handler)
8982 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8985 (long) rel->r_offset,
8987 h->root.root.string);
8991 if (r != bfd_reloc_ok)
8995 case bfd_reloc_overflow:
8996 /* If the overflowing reloc was to an undefined symbol,
8997 we have already printed one error message and there
8998 is no point complaining again. */
9000 h->root.type != bfd_link_hash_undefined)
9001 && (!((*info->callbacks->reloc_overflow)
9002 (info, (h ? &h->root : NULL), name, howto->name,
9003 (bfd_vma) 0, input_bfd, input_section,
9008 case bfd_reloc_undefined:
9009 if (!((*info->callbacks->undefined_symbol)
9010 (info, name, input_bfd, input_section,
9011 rel->r_offset, TRUE)))
9015 case bfd_reloc_outofrange:
9016 error_message = _("out of range");
9019 case bfd_reloc_notsupported:
9020 error_message = _("unsupported relocation");
9023 case bfd_reloc_dangerous:
9024 /* error_message should already be set. */
9028 error_message = _("unknown error");
9032 BFD_ASSERT (error_message != NULL);
9033 if (!((*info->callbacks->reloc_dangerous)
9034 (info, error_message, input_bfd, input_section,
9045 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
9046 adds the edit to the start of the list. (The list must be built in order of
9047 ascending INDEX: the function's callers are primarily responsible for
9048 maintaining that condition). */
9051 add_unwind_table_edit (arm_unwind_table_edit **head,
9052 arm_unwind_table_edit **tail,
9053 arm_unwind_edit_type type,
9054 asection *linked_section,
9057 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9058 xmalloc (sizeof (arm_unwind_table_edit));
9060 new_edit->type = type;
9061 new_edit->linked_section = linked_section;
9062 new_edit->index = index;
9066 new_edit->next = NULL;
9069 (*tail)->next = new_edit;
9078 new_edit->next = *head;
9087 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9089 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9091 adjust_exidx_size(asection *exidx_sec, int adjust)
9095 if (!exidx_sec->rawsize)
9096 exidx_sec->rawsize = exidx_sec->size;
9098 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9099 out_sec = exidx_sec->output_section;
9100 /* Adjust size of output section. */
9101 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9104 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9106 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9108 struct _arm_elf_section_data *exidx_arm_data;
9110 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9111 add_unwind_table_edit (
9112 &exidx_arm_data->u.exidx.unwind_edit_list,
9113 &exidx_arm_data->u.exidx.unwind_edit_tail,
9114 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9116 adjust_exidx_size(exidx_sec, 8);
9119 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9120 made to those tables, such that:
9122 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9123 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9124 codes which have been inlined into the index).
9126 The edits are applied when the tables are written
9127 (in elf32_arm_write_section).
9131 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9132 unsigned int num_text_sections,
9133 struct bfd_link_info *info)
9136 unsigned int last_second_word = 0, i;
9137 asection *last_exidx_sec = NULL;
9138 asection *last_text_sec = NULL;
9139 int last_unwind_type = -1;
9141 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9143 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9147 for (sec = inp->sections; sec != NULL; sec = sec->next)
9149 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9150 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9152 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9155 if (elf_sec->linked_to)
9157 Elf_Internal_Shdr *linked_hdr
9158 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9159 struct _arm_elf_section_data *linked_sec_arm_data
9160 = get_arm_elf_section_data (linked_hdr->bfd_section);
9162 if (linked_sec_arm_data == NULL)
9165 /* Link this .ARM.exidx section back from the text section it
9167 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9172 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9173 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9174 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
9177 for (i = 0; i < num_text_sections; i++)
9179 asection *sec = text_section_order[i];
9180 asection *exidx_sec;
9181 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9182 struct _arm_elf_section_data *exidx_arm_data;
9183 bfd_byte *contents = NULL;
9184 int deleted_exidx_bytes = 0;
9186 arm_unwind_table_edit *unwind_edit_head = NULL;
9187 arm_unwind_table_edit *unwind_edit_tail = NULL;
9188 Elf_Internal_Shdr *hdr;
9191 if (arm_data == NULL)
9194 exidx_sec = arm_data->u.text.arm_exidx_sec;
9195 if (exidx_sec == NULL)
9197 /* Section has no unwind data. */
9198 if (last_unwind_type == 0 || !last_exidx_sec)
9201 /* Ignore zero sized sections. */
9205 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9206 last_unwind_type = 0;
9210 /* Skip /DISCARD/ sections. */
9211 if (bfd_is_abs_section (exidx_sec->output_section))
9214 hdr = &elf_section_data (exidx_sec)->this_hdr;
9215 if (hdr->sh_type != SHT_ARM_EXIDX)
9218 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9219 if (exidx_arm_data == NULL)
9222 ibfd = exidx_sec->owner;
9224 if (hdr->contents != NULL)
9225 contents = hdr->contents;
9226 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9230 for (j = 0; j < hdr->sh_size; j += 8)
9232 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9236 /* An EXIDX_CANTUNWIND entry. */
9237 if (second_word == 1)
9239 if (last_unwind_type == 0)
9243 /* Inlined unwinding data. Merge if equal to previous. */
9244 else if ((second_word & 0x80000000) != 0)
9246 if (last_second_word == second_word && last_unwind_type == 1)
9249 last_second_word = second_word;
9251 /* Normal table entry. In theory we could merge these too,
9252 but duplicate entries are likely to be much less common. */
9258 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9259 DELETE_EXIDX_ENTRY, NULL, j / 8);
9261 deleted_exidx_bytes += 8;
9264 last_unwind_type = unwind_type;
9267 /* Free contents if we allocated it ourselves. */
9268 if (contents != hdr->contents)
9271 /* Record edits to be applied later (in elf32_arm_write_section). */
9272 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9273 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9275 if (deleted_exidx_bytes > 0)
9276 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9278 last_exidx_sec = exidx_sec;
9279 last_text_sec = sec;
9282 /* Add terminating CANTUNWIND entry. */
9283 if (last_exidx_sec && last_unwind_type != 0)
9284 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9290 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9291 bfd *ibfd, const char *name)
9293 asection *sec, *osec;
9295 sec = bfd_get_section_by_name (ibfd, name);
9296 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9299 osec = sec->output_section;
9300 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9303 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9304 sec->output_offset, sec->size))
9311 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9313 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9315 /* Invoke the regular ELF backend linker to do all the work. */
9316 if (!bfd_elf_final_link (abfd, info))
9319 /* Write out any glue sections now that we have created all the
9321 if (globals->bfd_of_glue_owner != NULL)
9323 if (! elf32_arm_output_glue_section (info, abfd,
9324 globals->bfd_of_glue_owner,
9325 ARM2THUMB_GLUE_SECTION_NAME))
9328 if (! elf32_arm_output_glue_section (info, abfd,
9329 globals->bfd_of_glue_owner,
9330 THUMB2ARM_GLUE_SECTION_NAME))
9333 if (! elf32_arm_output_glue_section (info, abfd,
9334 globals->bfd_of_glue_owner,
9335 VFP11_ERRATUM_VENEER_SECTION_NAME))
9338 if (! elf32_arm_output_glue_section (info, abfd,
9339 globals->bfd_of_glue_owner,
9340 ARM_BX_GLUE_SECTION_NAME))
9347 /* Set the right machine number. */
9350 elf32_arm_object_p (bfd *abfd)
9354 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9356 if (mach != bfd_mach_arm_unknown)
9357 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9359 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9360 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9363 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9368 /* Function to keep ARM specific flags in the ELF header. */
9371 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9373 if (elf_flags_init (abfd)
9374 && elf_elfheader (abfd)->e_flags != flags)
9376 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9378 if (flags & EF_ARM_INTERWORK)
9379 (*_bfd_error_handler)
9380 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9384 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9390 elf_elfheader (abfd)->e_flags = flags;
9391 elf_flags_init (abfd) = TRUE;
9397 /* Copy backend specific data from one object module to another. */
9400 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9405 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9408 in_flags = elf_elfheader (ibfd)->e_flags;
9409 out_flags = elf_elfheader (obfd)->e_flags;
9411 if (elf_flags_init (obfd)
9412 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9413 && in_flags != out_flags)
9415 /* Cannot mix APCS26 and APCS32 code. */
9416 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9419 /* Cannot mix float APCS and non-float APCS code. */
9420 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9423 /* If the src and dest have different interworking flags
9424 then turn off the interworking bit. */
9425 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9427 if (out_flags & EF_ARM_INTERWORK)
9429 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9432 in_flags &= ~EF_ARM_INTERWORK;
9435 /* Likewise for PIC, though don't warn for this case. */
9436 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9437 in_flags &= ~EF_ARM_PIC;
9440 elf_elfheader (obfd)->e_flags = in_flags;
9441 elf_flags_init (obfd) = TRUE;
9443 /* Also copy the EI_OSABI field. */
9444 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9445 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9447 /* Copy object attributes. */
9448 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9453 /* Values for Tag_ABI_PCS_R9_use. */
9462 /* Values for Tag_ABI_PCS_RW_data. */
9465 AEABI_PCS_RW_data_absolute,
9466 AEABI_PCS_RW_data_PCrel,
9467 AEABI_PCS_RW_data_SBrel,
9468 AEABI_PCS_RW_data_unused
9471 /* Values for Tag_ABI_enum_size. */
9477 AEABI_enum_forced_wide
9480 /* Determine whether an object attribute tag takes an integer, a
9484 elf32_arm_obj_attrs_arg_type (int tag)
9486 if (tag == Tag_compatibility)
9487 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9488 else if (tag == Tag_nodefaults)
9489 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9490 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9491 return ATTR_TYPE_FLAG_STR_VAL;
9493 return ATTR_TYPE_FLAG_INT_VAL;
9495 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9498 /* The ABI defines that Tag_conformance should be emitted first, and that
9499 Tag_nodefaults should be second (if either is defined). This sets those
9500 two positions, and bumps up the position of all the remaining tags to
9503 elf32_arm_obj_attrs_order (int num)
9506 return Tag_conformance;
9508 return Tag_nodefaults;
9509 if ((num - 2) < Tag_nodefaults)
9511 if ((num - 1) < Tag_conformance)
9516 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9517 Returns -1 if no architecture could be read. */
9520 get_secondary_compatible_arch (bfd *abfd)
9522 obj_attribute *attr =
9523 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9525 /* Note: the tag and its argument below are uleb128 values, though
9526 currently-defined values fit in one byte for each. */
9528 && attr->s[0] == Tag_CPU_arch
9529 && (attr->s[1] & 128) != 128
9533 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9537 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9538 The tag is removed if ARCH is -1. */
9541 set_secondary_compatible_arch (bfd *abfd, int arch)
9543 obj_attribute *attr =
9544 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9552 /* Note: the tag and its argument below are uleb128 values, though
9553 currently-defined values fit in one byte for each. */
9555 attr->s = (char *) bfd_alloc (abfd, 3);
9556 attr->s[0] = Tag_CPU_arch;
9561 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9565 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9566 int newtag, int secondary_compat)
9568 #define T(X) TAG_CPU_ARCH_##X
9569 int tagl, tagh, result;
9572 T(V6T2), /* PRE_V4. */
9576 T(V6T2), /* V5TE. */
9577 T(V6T2), /* V5TEJ. */
9584 T(V6K), /* PRE_V4. */
9589 T(V6K), /* V5TEJ. */
9591 T(V6KZ), /* V6KZ. */
9597 T(V7), /* PRE_V4. */
9616 T(V6K), /* V5TEJ. */
9618 T(V6KZ), /* V6KZ. */
9631 T(V6K), /* V5TEJ. */
9633 T(V6KZ), /* V6KZ. */
9637 T(V6S_M), /* V6_M. */
9638 T(V6S_M) /* V6S_M. */
9640 const int v4t_plus_v6_m[] =
9646 T(V5TE), /* V5TE. */
9647 T(V5TEJ), /* V5TEJ. */
9649 T(V6KZ), /* V6KZ. */
9650 T(V6T2), /* V6T2. */
9653 T(V6_M), /* V6_M. */
9654 T(V6S_M), /* V6S_M. */
9655 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9664 /* Pseudo-architecture. */
9668 /* Check we've not got a higher architecture than we know about. */
9670 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9672 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9676 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9678 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9679 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9680 oldtag = T(V4T_PLUS_V6_M);
9682 /* And override the new tag if we have a Tag_also_compatible_with on the
9685 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9686 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9687 newtag = T(V4T_PLUS_V6_M);
9689 tagl = (oldtag < newtag) ? oldtag : newtag;
9690 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9692 /* Architectures before V6KZ add features monotonically. */
9693 if (tagh <= TAG_CPU_ARCH_V6KZ)
9696 result = comb[tagh - T(V6T2)][tagl];
9698 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9699 as the canonical version. */
9700 if (result == T(V4T_PLUS_V6_M))
9703 *secondary_compat_out = T(V6_M);
9706 *secondary_compat_out = -1;
9710 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9711 ibfd, oldtag, newtag);
9719 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9720 are conflicting attributes. */
9723 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9725 obj_attribute *in_attr;
9726 obj_attribute *out_attr;
9727 obj_attribute_list *in_list;
9728 obj_attribute_list *out_list;
9729 obj_attribute_list **out_listp;
9730 /* Some tags have 0 = don't care, 1 = strong requirement,
9731 2 = weak requirement. */
9732 static const int order_021[3] = {0, 2, 1};
9734 bfd_boolean result = TRUE;
9736 /* Skip the linker stubs file. This preserves previous behavior
9737 of accepting unknown attributes in the first input file - but
9739 if (ibfd->flags & BFD_LINKER_CREATED)
9742 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9744 /* This is the first object. Copy the attributes. */
9745 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9747 /* Use the Tag_null value to indicate the attributes have been
9749 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9754 in_attr = elf_known_obj_attributes_proc (ibfd);
9755 out_attr = elf_known_obj_attributes_proc (obfd);
9756 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9757 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9759 /* Ignore mismatches if the object doesn't use floating point. */
9760 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9761 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9762 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9765 (_("error: %B uses VFP register arguments, %B does not"),
9771 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9773 /* Merge this attribute with existing attributes. */
9776 case Tag_CPU_raw_name:
9778 /* These are merged after Tag_CPU_arch. */
9781 case Tag_ABI_optimization_goals:
9782 case Tag_ABI_FP_optimization_goals:
9783 /* Use the first value seen. */
9788 int secondary_compat = -1, secondary_compat_out = -1;
9789 unsigned int saved_out_attr = out_attr[i].i;
9790 static const char *name_table[] = {
9791 /* These aren't real CPU names, but we can't guess
9792 that from the architecture version alone. */
9808 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9809 secondary_compat = get_secondary_compatible_arch (ibfd);
9810 secondary_compat_out = get_secondary_compatible_arch (obfd);
9811 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9812 &secondary_compat_out,
9815 set_secondary_compatible_arch (obfd, secondary_compat_out);
9817 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9818 if (out_attr[i].i == saved_out_attr)
9819 ; /* Leave the names alone. */
9820 else if (out_attr[i].i == in_attr[i].i)
9822 /* The output architecture has been changed to match the
9823 input architecture. Use the input names. */
9824 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9825 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9827 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9828 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9833 out_attr[Tag_CPU_name].s = NULL;
9834 out_attr[Tag_CPU_raw_name].s = NULL;
9837 /* If we still don't have a value for Tag_CPU_name,
9838 make one up now. Tag_CPU_raw_name remains blank. */
9839 if (out_attr[Tag_CPU_name].s == NULL
9840 && out_attr[i].i < ARRAY_SIZE (name_table))
9841 out_attr[Tag_CPU_name].s =
9842 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9846 case Tag_ARM_ISA_use:
9847 case Tag_THUMB_ISA_use:
9849 case Tag_Advanced_SIMD_arch:
9850 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9851 case Tag_ABI_FP_rounding:
9852 case Tag_ABI_FP_exceptions:
9853 case Tag_ABI_FP_user_exceptions:
9854 case Tag_ABI_FP_number_model:
9855 case Tag_VFP_HP_extension:
9856 case Tag_CPU_unaligned_access:
9858 case Tag_Virtualization_use:
9859 case Tag_MPextension_use:
9860 /* Use the largest value specified. */
9861 if (in_attr[i].i > out_attr[i].i)
9862 out_attr[i].i = in_attr[i].i;
9865 case Tag_ABI_align8_preserved:
9866 case Tag_ABI_PCS_RO_data:
9867 /* Use the smallest value specified. */
9868 if (in_attr[i].i < out_attr[i].i)
9869 out_attr[i].i = in_attr[i].i;
9872 case Tag_ABI_align8_needed:
9873 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9874 && (in_attr[Tag_ABI_align8_preserved].i == 0
9875 || out_attr[Tag_ABI_align8_preserved].i == 0))
9877 /* This error message should be enabled once all non-conformant
9878 binaries in the toolchain have had the attributes set
9881 (_("error: %B: 8-byte data alignment conflicts with %B"),
9886 case Tag_ABI_FP_denormal:
9887 case Tag_ABI_PCS_GOT_use:
9888 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9889 value if greater than 2 (for future-proofing). */
9890 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9891 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9892 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9893 out_attr[i].i = in_attr[i].i;
9897 case Tag_CPU_arch_profile:
9898 if (out_attr[i].i != in_attr[i].i)
9900 /* 0 will merge with anything.
9901 'A' and 'S' merge to 'A'.
9902 'R' and 'S' merge to 'R'.
9903 'M' and 'A|R|S' is an error. */
9904 if (out_attr[i].i == 0
9905 || (out_attr[i].i == 'S'
9906 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9907 out_attr[i].i = in_attr[i].i;
9908 else if (in_attr[i].i == 0
9909 || (in_attr[i].i == 'S'
9910 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9915 (_("error: %B: Conflicting architecture profiles %c/%c"),
9917 in_attr[i].i ? in_attr[i].i : '0',
9918 out_attr[i].i ? out_attr[i].i : '0');
9943 /* Values greater than 6 aren't defined, so just pick the
9945 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
9947 out_attr[i] = in_attr[i];
9950 /* The output uses the superset of input features
9951 (ISA version) and registers. */
9952 ver = vfp_versions[in_attr[i].i].ver;
9953 if (ver < vfp_versions[out_attr[i].i].ver)
9954 ver = vfp_versions[out_attr[i].i].ver;
9955 regs = vfp_versions[in_attr[i].i].regs;
9956 if (regs < vfp_versions[out_attr[i].i].regs)
9957 regs = vfp_versions[out_attr[i].i].regs;
9958 /* This assumes all possible supersets are also a valid
9960 for (newval = 6; newval > 0; newval--)
9962 if (regs == vfp_versions[newval].regs
9963 && ver == vfp_versions[newval].ver)
9966 out_attr[i].i = newval;
9969 case Tag_PCS_config:
9970 if (out_attr[i].i == 0)
9971 out_attr[i].i = in_attr[i].i;
9972 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9974 /* It's sometimes ok to mix different configs, so this is only
9977 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9980 case Tag_ABI_PCS_R9_use:
9981 if (in_attr[i].i != out_attr[i].i
9982 && out_attr[i].i != AEABI_R9_unused
9983 && in_attr[i].i != AEABI_R9_unused)
9986 (_("error: %B: Conflicting use of R9"), ibfd);
9989 if (out_attr[i].i == AEABI_R9_unused)
9990 out_attr[i].i = in_attr[i].i;
9992 case Tag_ABI_PCS_RW_data:
9993 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9994 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9995 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9998 (_("error: %B: SB relative addressing conflicts with use of R9"),
10002 /* Use the smallest value specified. */
10003 if (in_attr[i].i < out_attr[i].i)
10004 out_attr[i].i = in_attr[i].i;
10006 case Tag_ABI_PCS_wchar_t:
10007 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10008 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10011 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10012 ibfd, in_attr[i].i, out_attr[i].i);
10014 else if (in_attr[i].i && !out_attr[i].i)
10015 out_attr[i].i = in_attr[i].i;
10017 case Tag_ABI_enum_size:
10018 if (in_attr[i].i != AEABI_enum_unused)
10020 if (out_attr[i].i == AEABI_enum_unused
10021 || out_attr[i].i == AEABI_enum_forced_wide)
10023 /* The existing object is compatible with anything.
10024 Use whatever requirements the new object has. */
10025 out_attr[i].i = in_attr[i].i;
10027 else if (in_attr[i].i != AEABI_enum_forced_wide
10028 && out_attr[i].i != in_attr[i].i
10029 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10031 static const char *aeabi_enum_names[] =
10032 { "", "variable-size", "32-bit", "" };
10033 const char *in_name =
10034 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10035 ? aeabi_enum_names[in_attr[i].i]
10037 const char *out_name =
10038 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10039 ? aeabi_enum_names[out_attr[i].i]
10042 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10043 ibfd, in_name, out_name);
10047 case Tag_ABI_VFP_args:
10050 case Tag_ABI_WMMX_args:
10051 if (in_attr[i].i != out_attr[i].i)
10054 (_("error: %B uses iWMMXt register arguments, %B does not"),
10059 case Tag_compatibility:
10060 /* Merged in target-independent code. */
10062 case Tag_ABI_HardFP_use:
10063 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
10064 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
10065 || (in_attr[i].i == 2 && out_attr[i].i == 1))
10067 else if (in_attr[i].i > out_attr[i].i)
10068 out_attr[i].i = in_attr[i].i;
10070 case Tag_ABI_FP_16bit_format:
10071 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10073 if (in_attr[i].i != out_attr[i].i)
10076 (_("error: fp16 format mismatch between %B and %B"),
10081 if (in_attr[i].i != 0)
10082 out_attr[i].i = in_attr[i].i;
10085 case Tag_nodefaults:
10086 /* This tag is set if it exists, but the value is unused (and is
10087 typically zero). We don't actually need to do anything here -
10088 the merge happens automatically when the type flags are merged
10091 case Tag_also_compatible_with:
10092 /* Already done in Tag_CPU_arch. */
10094 case Tag_conformance:
10095 /* Keep the attribute if it matches. Throw it away otherwise.
10096 No attribute means no claim to conform. */
10097 if (!in_attr[i].s || !out_attr[i].s
10098 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10099 out_attr[i].s = NULL;
10104 bfd *err_bfd = NULL;
10106 /* The "known_obj_attributes" table does contain some undefined
10107 attributes. Ensure that there are unused. */
10108 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10110 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10113 if (err_bfd != NULL)
10115 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10116 if ((i & 127) < 64)
10119 (_("%B: Unknown mandatory EABI object attribute %d"),
10121 bfd_set_error (bfd_error_bad_value);
10127 (_("Warning: %B: Unknown EABI object attribute %d"),
10132 /* Only pass on attributes that match in both inputs. */
10133 if (in_attr[i].i != out_attr[i].i
10134 || in_attr[i].s != out_attr[i].s
10135 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10136 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10139 out_attr[i].s = NULL;
10144 /* If out_attr was copied from in_attr then it won't have a type yet. */
10145 if (in_attr[i].type && !out_attr[i].type)
10146 out_attr[i].type = in_attr[i].type;
10149 /* Merge Tag_compatibility attributes and any common GNU ones. */
10150 _bfd_elf_merge_object_attributes (ibfd, obfd);
10152 /* Check for any attributes not known on ARM. */
10153 in_list = elf_other_obj_attributes_proc (ibfd);
10154 out_listp = &elf_other_obj_attributes_proc (obfd);
10155 out_list = *out_listp;
10157 for (; in_list || out_list; )
10159 bfd *err_bfd = NULL;
10162 /* The tags for each list are in numerical order. */
10163 /* If the tags are equal, then merge. */
10164 if (out_list && (!in_list || in_list->tag > out_list->tag))
10166 /* This attribute only exists in obfd. We can't merge, and we don't
10167 know what the tag means, so delete it. */
10169 err_tag = out_list->tag;
10170 *out_listp = out_list->next;
10171 out_list = *out_listp;
10173 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10175 /* This attribute only exists in ibfd. We can't merge, and we don't
10176 know what the tag means, so ignore it. */
10178 err_tag = in_list->tag;
10179 in_list = in_list->next;
10181 else /* The tags are equal. */
10183 /* As present, all attributes in the list are unknown, and
10184 therefore can't be merged meaningfully. */
10186 err_tag = out_list->tag;
10188 /* Only pass on attributes that match in both inputs. */
10189 if (in_list->attr.i != out_list->attr.i
10190 || in_list->attr.s != out_list->attr.s
10191 || (in_list->attr.s && out_list->attr.s
10192 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10194 /* No match. Delete the attribute. */
10195 *out_listp = out_list->next;
10196 out_list = *out_listp;
10200 /* Matched. Keep the attribute and move to the next. */
10201 out_list = out_list->next;
10202 in_list = in_list->next;
10208 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10209 if ((err_tag & 127) < 64)
10212 (_("%B: Unknown mandatory EABI object attribute %d"),
10214 bfd_set_error (bfd_error_bad_value);
10220 (_("Warning: %B: Unknown EABI object attribute %d"),
10229 /* Return TRUE if the two EABI versions are incompatible. */
10232 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10234 /* v4 and v5 are the same spec before and after it was released,
10235 so allow mixing them. */
10236 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10237 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10240 return (iver == over);
10243 /* Merge backend specific data from an object file to the output
10244 object file when linking. */
10247 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10249 /* Display the flags field. */
10252 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10254 FILE * file = (FILE *) ptr;
10255 unsigned long flags;
10257 BFD_ASSERT (abfd != NULL && ptr != NULL);
10259 /* Print normal ELF private data. */
10260 _bfd_elf_print_private_bfd_data (abfd, ptr);
10262 flags = elf_elfheader (abfd)->e_flags;
10263 /* Ignore init flag - it may not be set, despite the flags field
10264 containing valid data. */
10266 /* xgettext:c-format */
10267 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10269 switch (EF_ARM_EABI_VERSION (flags))
10271 case EF_ARM_EABI_UNKNOWN:
10272 /* The following flag bits are GNU extensions and not part of the
10273 official ARM ELF extended ABI. Hence they are only decoded if
10274 the EABI version is not set. */
10275 if (flags & EF_ARM_INTERWORK)
10276 fprintf (file, _(" [interworking enabled]"));
10278 if (flags & EF_ARM_APCS_26)
10279 fprintf (file, " [APCS-26]");
10281 fprintf (file, " [APCS-32]");
10283 if (flags & EF_ARM_VFP_FLOAT)
10284 fprintf (file, _(" [VFP float format]"));
10285 else if (flags & EF_ARM_MAVERICK_FLOAT)
10286 fprintf (file, _(" [Maverick float format]"));
10288 fprintf (file, _(" [FPA float format]"));
10290 if (flags & EF_ARM_APCS_FLOAT)
10291 fprintf (file, _(" [floats passed in float registers]"));
10293 if (flags & EF_ARM_PIC)
10294 fprintf (file, _(" [position independent]"));
10296 if (flags & EF_ARM_NEW_ABI)
10297 fprintf (file, _(" [new ABI]"));
10299 if (flags & EF_ARM_OLD_ABI)
10300 fprintf (file, _(" [old ABI]"));
10302 if (flags & EF_ARM_SOFT_FLOAT)
10303 fprintf (file, _(" [software FP]"));
10305 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10306 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10307 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10308 | EF_ARM_MAVERICK_FLOAT);
10311 case EF_ARM_EABI_VER1:
10312 fprintf (file, _(" [Version1 EABI]"));
10314 if (flags & EF_ARM_SYMSARESORTED)
10315 fprintf (file, _(" [sorted symbol table]"));
10317 fprintf (file, _(" [unsorted symbol table]"));
10319 flags &= ~ EF_ARM_SYMSARESORTED;
10322 case EF_ARM_EABI_VER2:
10323 fprintf (file, _(" [Version2 EABI]"));
10325 if (flags & EF_ARM_SYMSARESORTED)
10326 fprintf (file, _(" [sorted symbol table]"));
10328 fprintf (file, _(" [unsorted symbol table]"));
10330 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10331 fprintf (file, _(" [dynamic symbols use segment index]"));
10333 if (flags & EF_ARM_MAPSYMSFIRST)
10334 fprintf (file, _(" [mapping symbols precede others]"));
10336 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10337 | EF_ARM_MAPSYMSFIRST);
10340 case EF_ARM_EABI_VER3:
10341 fprintf (file, _(" [Version3 EABI]"));
10344 case EF_ARM_EABI_VER4:
10345 fprintf (file, _(" [Version4 EABI]"));
10348 case EF_ARM_EABI_VER5:
10349 fprintf (file, _(" [Version5 EABI]"));
10351 if (flags & EF_ARM_BE8)
10352 fprintf (file, _(" [BE8]"));
10354 if (flags & EF_ARM_LE8)
10355 fprintf (file, _(" [LE8]"));
10357 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10361 fprintf (file, _(" <EABI version unrecognised>"));
10365 flags &= ~ EF_ARM_EABIMASK;
10367 if (flags & EF_ARM_RELEXEC)
10368 fprintf (file, _(" [relocatable executable]"));
10370 if (flags & EF_ARM_HASENTRY)
10371 fprintf (file, _(" [has entry point]"));
10373 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10376 fprintf (file, _("<Unrecognised flag bits set>"));
10378 fputc ('\n', file);
10384 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10386 switch (ELF_ST_TYPE (elf_sym->st_info))
10388 case STT_ARM_TFUNC:
10389 return ELF_ST_TYPE (elf_sym->st_info);
10391 case STT_ARM_16BIT:
10392 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10393 This allows us to distinguish between data used by Thumb instructions
10394 and non-data (which is probably code) inside Thumb regions of an
10396 if (type != STT_OBJECT && type != STT_TLS)
10397 return ELF_ST_TYPE (elf_sym->st_info);
10408 elf32_arm_gc_mark_hook (asection *sec,
10409 struct bfd_link_info *info,
10410 Elf_Internal_Rela *rel,
10411 struct elf_link_hash_entry *h,
10412 Elf_Internal_Sym *sym)
10415 switch (ELF32_R_TYPE (rel->r_info))
10417 case R_ARM_GNU_VTINHERIT:
10418 case R_ARM_GNU_VTENTRY:
10422 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10425 /* Update the got entry reference counts for the section being removed. */
10428 elf32_arm_gc_sweep_hook (bfd * abfd,
10429 struct bfd_link_info * info,
10431 const Elf_Internal_Rela * relocs)
10433 Elf_Internal_Shdr *symtab_hdr;
10434 struct elf_link_hash_entry **sym_hashes;
10435 bfd_signed_vma *local_got_refcounts;
10436 const Elf_Internal_Rela *rel, *relend;
10437 struct elf32_arm_link_hash_table * globals;
10439 if (info->relocatable)
10442 globals = elf32_arm_hash_table (info);
10444 elf_section_data (sec)->local_dynrel = NULL;
10446 symtab_hdr = & elf_symtab_hdr (abfd);
10447 sym_hashes = elf_sym_hashes (abfd);
10448 local_got_refcounts = elf_local_got_refcounts (abfd);
10450 check_use_blx (globals);
10452 relend = relocs + sec->reloc_count;
10453 for (rel = relocs; rel < relend; rel++)
10455 unsigned long r_symndx;
10456 struct elf_link_hash_entry *h = NULL;
10459 r_symndx = ELF32_R_SYM (rel->r_info);
10460 if (r_symndx >= symtab_hdr->sh_info)
10462 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10463 while (h->root.type == bfd_link_hash_indirect
10464 || h->root.type == bfd_link_hash_warning)
10465 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10468 r_type = ELF32_R_TYPE (rel->r_info);
10469 r_type = arm_real_reloc_type (globals, r_type);
10473 case R_ARM_GOT_PREL:
10474 case R_ARM_TLS_GD32:
10475 case R_ARM_TLS_IE32:
10478 if (h->got.refcount > 0)
10479 h->got.refcount -= 1;
10481 else if (local_got_refcounts != NULL)
10483 if (local_got_refcounts[r_symndx] > 0)
10484 local_got_refcounts[r_symndx] -= 1;
10488 case R_ARM_TLS_LDM32:
10489 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10493 case R_ARM_ABS32_NOI:
10495 case R_ARM_REL32_NOI:
10501 case R_ARM_THM_CALL:
10502 case R_ARM_THM_JUMP24:
10503 case R_ARM_THM_JUMP19:
10504 case R_ARM_MOVW_ABS_NC:
10505 case R_ARM_MOVT_ABS:
10506 case R_ARM_MOVW_PREL_NC:
10507 case R_ARM_MOVT_PREL:
10508 case R_ARM_THM_MOVW_ABS_NC:
10509 case R_ARM_THM_MOVT_ABS:
10510 case R_ARM_THM_MOVW_PREL_NC:
10511 case R_ARM_THM_MOVT_PREL:
10512 /* Should the interworking branches be here also? */
10516 struct elf32_arm_link_hash_entry *eh;
10517 struct elf32_arm_relocs_copied **pp;
10518 struct elf32_arm_relocs_copied *p;
10520 eh = (struct elf32_arm_link_hash_entry *) h;
10522 if (h->plt.refcount > 0)
10524 h->plt.refcount -= 1;
10525 if (r_type == R_ARM_THM_CALL)
10526 eh->plt_maybe_thumb_refcount--;
10528 if (r_type == R_ARM_THM_JUMP24
10529 || r_type == R_ARM_THM_JUMP19)
10530 eh->plt_thumb_refcount--;
10533 if (r_type == R_ARM_ABS32
10534 || r_type == R_ARM_REL32
10535 || r_type == R_ARM_ABS32_NOI
10536 || r_type == R_ARM_REL32_NOI)
10538 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10540 if (p->section == sec)
10543 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10544 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10562 /* Look through the relocs for a section during the first phase. */
10565 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10566 asection *sec, const Elf_Internal_Rela *relocs)
10568 Elf_Internal_Shdr *symtab_hdr;
10569 struct elf_link_hash_entry **sym_hashes;
10570 const Elf_Internal_Rela *rel;
10571 const Elf_Internal_Rela *rel_end;
10574 bfd_vma *local_got_offsets;
10575 struct elf32_arm_link_hash_table *htab;
10576 bfd_boolean needs_plt;
10577 unsigned long nsyms;
10579 if (info->relocatable)
10582 BFD_ASSERT (is_arm_elf (abfd));
10584 htab = elf32_arm_hash_table (info);
10587 /* Create dynamic sections for relocatable executables so that we can
10588 copy relocations. */
10589 if (htab->root.is_relocatable_executable
10590 && ! htab->root.dynamic_sections_created)
10592 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10596 dynobj = elf_hash_table (info)->dynobj;
10597 local_got_offsets = elf_local_got_offsets (abfd);
10599 symtab_hdr = & elf_symtab_hdr (abfd);
10600 sym_hashes = elf_sym_hashes (abfd);
10601 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10603 rel_end = relocs + sec->reloc_count;
10604 for (rel = relocs; rel < rel_end; rel++)
10606 struct elf_link_hash_entry *h;
10607 struct elf32_arm_link_hash_entry *eh;
10608 unsigned long r_symndx;
10611 r_symndx = ELF32_R_SYM (rel->r_info);
10612 r_type = ELF32_R_TYPE (rel->r_info);
10613 r_type = arm_real_reloc_type (htab, r_type);
10615 if (r_symndx >= nsyms
10616 /* PR 9934: It is possible to have relocations that do not
10617 refer to symbols, thus it is also possible to have an
10618 object file containing relocations but no symbol table. */
10619 && (r_symndx > 0 || nsyms > 0))
10621 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10626 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10630 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10631 while (h->root.type == bfd_link_hash_indirect
10632 || h->root.type == bfd_link_hash_warning)
10633 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10636 eh = (struct elf32_arm_link_hash_entry *) h;
10641 case R_ARM_GOT_PREL:
10642 case R_ARM_TLS_GD32:
10643 case R_ARM_TLS_IE32:
10644 /* This symbol requires a global offset table entry. */
10646 int tls_type, old_tls_type;
10650 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10651 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10652 default: tls_type = GOT_NORMAL; break;
10658 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10662 bfd_signed_vma *local_got_refcounts;
10664 /* This is a global offset table entry for a local symbol. */
10665 local_got_refcounts = elf_local_got_refcounts (abfd);
10666 if (local_got_refcounts == NULL)
10668 bfd_size_type size;
10670 size = symtab_hdr->sh_info;
10671 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10672 local_got_refcounts = (bfd_signed_vma *)
10673 bfd_zalloc (abfd, size);
10674 if (local_got_refcounts == NULL)
10676 elf_local_got_refcounts (abfd) = local_got_refcounts;
10677 elf32_arm_local_got_tls_type (abfd)
10678 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10680 local_got_refcounts[r_symndx] += 1;
10681 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10684 /* We will already have issued an error message if there is a
10685 TLS / non-TLS mismatch, based on the symbol type. We don't
10686 support any linker relaxations. So just combine any TLS
10688 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10689 && tls_type != GOT_NORMAL)
10690 tls_type |= old_tls_type;
10692 if (old_tls_type != tls_type)
10695 elf32_arm_hash_entry (h)->tls_type = tls_type;
10697 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10700 /* Fall through. */
10702 case R_ARM_TLS_LDM32:
10703 if (r_type == R_ARM_TLS_LDM32)
10704 htab->tls_ldm_got.refcount++;
10705 /* Fall through. */
10707 case R_ARM_GOTOFF32:
10709 if (htab->sgot == NULL)
10711 if (htab->root.dynobj == NULL)
10712 htab->root.dynobj = abfd;
10713 if (!create_got_section (htab->root.dynobj, info))
10719 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10720 ldr __GOTT_INDEX__ offsets. */
10721 if (!htab->vxworks_p)
10723 /* Fall through. */
10730 case R_ARM_THM_CALL:
10731 case R_ARM_THM_JUMP24:
10732 case R_ARM_THM_JUMP19:
10736 case R_ARM_MOVW_ABS_NC:
10737 case R_ARM_MOVT_ABS:
10738 case R_ARM_THM_MOVW_ABS_NC:
10739 case R_ARM_THM_MOVT_ABS:
10742 (*_bfd_error_handler)
10743 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10744 abfd, elf32_arm_howto_table_1[r_type].name,
10745 (h) ? h->root.root.string : "a local symbol");
10746 bfd_set_error (bfd_error_bad_value);
10750 /* Fall through. */
10752 case R_ARM_ABS32_NOI:
10754 case R_ARM_REL32_NOI:
10755 case R_ARM_MOVW_PREL_NC:
10756 case R_ARM_MOVT_PREL:
10757 case R_ARM_THM_MOVW_PREL_NC:
10758 case R_ARM_THM_MOVT_PREL:
10762 /* Should the interworking branches be listed here? */
10765 /* If this reloc is in a read-only section, we might
10766 need a copy reloc. We can't check reliably at this
10767 stage whether the section is read-only, as input
10768 sections have not yet been mapped to output sections.
10769 Tentatively set the flag for now, and correct in
10770 adjust_dynamic_symbol. */
10772 h->non_got_ref = 1;
10774 /* We may need a .plt entry if the function this reloc
10775 refers to is in a different object. We can't tell for
10776 sure yet, because something later might force the
10781 /* If we create a PLT entry, this relocation will reference
10782 it, even if it's an ABS32 relocation. */
10783 h->plt.refcount += 1;
10785 /* It's too early to use htab->use_blx here, so we have to
10786 record possible blx references separately from
10787 relocs that definitely need a thumb stub. */
10789 if (r_type == R_ARM_THM_CALL)
10790 eh->plt_maybe_thumb_refcount += 1;
10792 if (r_type == R_ARM_THM_JUMP24
10793 || r_type == R_ARM_THM_JUMP19)
10794 eh->plt_thumb_refcount += 1;
10797 /* If we are creating a shared library or relocatable executable,
10798 and this is a reloc against a global symbol, or a non PC
10799 relative reloc against a local symbol, then we need to copy
10800 the reloc into the shared library. However, if we are linking
10801 with -Bsymbolic, we do not need to copy a reloc against a
10802 global symbol which is defined in an object we are
10803 including in the link (i.e., DEF_REGULAR is set). At
10804 this point we have not seen all the input files, so it is
10805 possible that DEF_REGULAR is not set now but will be set
10806 later (it is never cleared). We account for that
10807 possibility below by storing information in the
10808 relocs_copied field of the hash table entry. */
10809 if ((info->shared || htab->root.is_relocatable_executable)
10810 && (sec->flags & SEC_ALLOC) != 0
10811 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10812 || (h != NULL && ! h->needs_plt
10813 && (! info->symbolic || ! h->def_regular))))
10815 struct elf32_arm_relocs_copied *p, **head;
10817 /* When creating a shared object, we must copy these
10818 reloc types into the output file. We create a reloc
10819 section in dynobj and make room for this reloc. */
10820 if (sreloc == NULL)
10822 sreloc = _bfd_elf_make_dynamic_reloc_section
10823 (sec, dynobj, 2, abfd, ! htab->use_rel);
10825 if (sreloc == NULL)
10828 /* BPABI objects never have dynamic relocations mapped. */
10829 if (htab->symbian_p)
10833 flags = bfd_get_section_flags (dynobj, sreloc);
10834 flags &= ~(SEC_LOAD | SEC_ALLOC);
10835 bfd_set_section_flags (dynobj, sreloc, flags);
10839 /* If this is a global symbol, we count the number of
10840 relocations we need for this symbol. */
10843 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10847 /* Track dynamic relocs needed for local syms too.
10848 We really need local syms available to do this
10849 easily. Oh well. */
10852 Elf_Internal_Sym *isym;
10854 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10859 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10863 vpp = &elf_section_data (s)->local_dynrel;
10864 head = (struct elf32_arm_relocs_copied **) vpp;
10868 if (p == NULL || p->section != sec)
10870 bfd_size_type amt = sizeof *p;
10872 p = (struct elf32_arm_relocs_copied *)
10873 bfd_alloc (htab->root.dynobj, amt);
10883 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10889 /* This relocation describes the C++ object vtable hierarchy.
10890 Reconstruct it for later use during GC. */
10891 case R_ARM_GNU_VTINHERIT:
10892 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10896 /* This relocation describes which C++ vtable entries are actually
10897 used. Record for later use during GC. */
10898 case R_ARM_GNU_VTENTRY:
10899 BFD_ASSERT (h != NULL);
10901 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10910 /* Unwinding tables are not referenced directly. This pass marks them as
10911 required if the corresponding code section is marked. */
10914 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10915 elf_gc_mark_hook_fn gc_mark_hook)
10918 Elf_Internal_Shdr **elf_shdrp;
10921 /* Marking EH data may cause additional code sections to be marked,
10922 requiring multiple passes. */
10927 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10931 if (! is_arm_elf (sub))
10934 elf_shdrp = elf_elfsections (sub);
10935 for (o = sub->sections; o != NULL; o = o->next)
10937 Elf_Internal_Shdr *hdr;
10939 hdr = &elf_section_data (o)->this_hdr;
10940 if (hdr->sh_type == SHT_ARM_EXIDX
10942 && hdr->sh_link < elf_numsections (sub)
10944 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10947 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10957 /* Treat mapping symbols as special target symbols. */
10960 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
10962 return bfd_is_arm_special_symbol_name (sym->name,
10963 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
10966 /* This is a copy of elf_find_function() from elf.c except that
10967 ARM mapping symbols are ignored when looking for function names
10968 and STT_ARM_TFUNC is considered to a function type. */
10971 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
10972 asection * section,
10973 asymbol ** symbols,
10975 const char ** filename_ptr,
10976 const char ** functionname_ptr)
10978 const char * filename = NULL;
10979 asymbol * func = NULL;
10980 bfd_vma low_func = 0;
10983 for (p = symbols; *p != NULL; p++)
10985 elf_symbol_type *q;
10987 q = (elf_symbol_type *) *p;
10989 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
10994 filename = bfd_asymbol_name (&q->symbol);
10997 case STT_ARM_TFUNC:
10999 /* Skip mapping symbols. */
11000 if ((q->symbol.flags & BSF_LOCAL)
11001 && bfd_is_arm_special_symbol_name (q->symbol.name,
11002 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11004 /* Fall through. */
11005 if (bfd_get_section (&q->symbol) == section
11006 && q->symbol.value >= low_func
11007 && q->symbol.value <= offset)
11009 func = (asymbol *) q;
11010 low_func = q->symbol.value;
11020 *filename_ptr = filename;
11021 if (functionname_ptr)
11022 *functionname_ptr = bfd_asymbol_name (func);
11028 /* Find the nearest line to a particular section and offset, for error
11029 reporting. This code is a duplicate of the code in elf.c, except
11030 that it uses arm_elf_find_function. */
11033 elf32_arm_find_nearest_line (bfd * abfd,
11034 asection * section,
11035 asymbol ** symbols,
11037 const char ** filename_ptr,
11038 const char ** functionname_ptr,
11039 unsigned int * line_ptr)
11041 bfd_boolean found = FALSE;
11043 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11045 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11046 filename_ptr, functionname_ptr,
11048 & elf_tdata (abfd)->dwarf2_find_line_info))
11050 if (!*functionname_ptr)
11051 arm_elf_find_function (abfd, section, symbols, offset,
11052 *filename_ptr ? NULL : filename_ptr,
11058 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11059 & found, filename_ptr,
11060 functionname_ptr, line_ptr,
11061 & elf_tdata (abfd)->line_info))
11064 if (found && (*functionname_ptr || *line_ptr))
11067 if (symbols == NULL)
11070 if (! arm_elf_find_function (abfd, section, symbols, offset,
11071 filename_ptr, functionname_ptr))
11079 elf32_arm_find_inliner_info (bfd * abfd,
11080 const char ** filename_ptr,
11081 const char ** functionname_ptr,
11082 unsigned int * line_ptr)
11085 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11086 functionname_ptr, line_ptr,
11087 & elf_tdata (abfd)->dwarf2_find_line_info);
11091 /* Adjust a symbol defined by a dynamic object and referenced by a
11092 regular object. The current definition is in some section of the
11093 dynamic object, but we're not including those sections. We have to
11094 change the definition to something the rest of the link can
11098 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11099 struct elf_link_hash_entry * h)
11103 struct elf32_arm_link_hash_entry * eh;
11104 struct elf32_arm_link_hash_table *globals;
11106 globals = elf32_arm_hash_table (info);
11107 dynobj = elf_hash_table (info)->dynobj;
11109 /* Make sure we know what is going on here. */
11110 BFD_ASSERT (dynobj != NULL
11112 || h->u.weakdef != NULL
11115 && !h->def_regular)));
11117 eh = (struct elf32_arm_link_hash_entry *) h;
11119 /* If this is a function, put it in the procedure linkage table. We
11120 will fill in the contents of the procedure linkage table later,
11121 when we know the address of the .got section. */
11122 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11125 if (h->plt.refcount <= 0
11126 || SYMBOL_CALLS_LOCAL (info, h)
11127 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11128 && h->root.type == bfd_link_hash_undefweak))
11130 /* This case can occur if we saw a PLT32 reloc in an input
11131 file, but the symbol was never referred to by a dynamic
11132 object, or if all references were garbage collected. In
11133 such a case, we don't actually need to build a procedure
11134 linkage table, and we can just do a PC24 reloc instead. */
11135 h->plt.offset = (bfd_vma) -1;
11136 eh->plt_thumb_refcount = 0;
11137 eh->plt_maybe_thumb_refcount = 0;
11145 /* It's possible that we incorrectly decided a .plt reloc was
11146 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11147 in check_relocs. We can't decide accurately between function
11148 and non-function syms in check-relocs; Objects loaded later in
11149 the link may change h->type. So fix it now. */
11150 h->plt.offset = (bfd_vma) -1;
11151 eh->plt_thumb_refcount = 0;
11152 eh->plt_maybe_thumb_refcount = 0;
11155 /* If this is a weak symbol, and there is a real definition, the
11156 processor independent code will have arranged for us to see the
11157 real definition first, and we can just use the same value. */
11158 if (h->u.weakdef != NULL)
11160 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11161 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11162 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11163 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11167 /* If there are no non-GOT references, we do not need a copy
11169 if (!h->non_got_ref)
11172 /* This is a reference to a symbol defined by a dynamic object which
11173 is not a function. */
11175 /* If we are creating a shared library, we must presume that the
11176 only references to the symbol are via the global offset table.
11177 For such cases we need not do anything here; the relocations will
11178 be handled correctly by relocate_section. Relocatable executables
11179 can reference data in shared objects directly, so we don't need to
11180 do anything here. */
11181 if (info->shared || globals->root.is_relocatable_executable)
11186 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11187 h->root.root.string);
11191 /* We must allocate the symbol in our .dynbss section, which will
11192 become part of the .bss section of the executable. There will be
11193 an entry for this symbol in the .dynsym section. The dynamic
11194 object will contain position independent code, so all references
11195 from the dynamic object to this symbol will go through the global
11196 offset table. The dynamic linker will use the .dynsym entry to
11197 determine the address it must put in the global offset table, so
11198 both the dynamic object and the regular object will refer to the
11199 same memory location for the variable. */
11200 s = bfd_get_section_by_name (dynobj, ".dynbss");
11201 BFD_ASSERT (s != NULL);
11203 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11204 copy the initial value out of the dynamic object and into the
11205 runtime process image. We need to remember the offset into the
11206 .rel(a).bss section we are going to use. */
11207 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11211 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11212 BFD_ASSERT (srel != NULL);
11213 srel->size += RELOC_SIZE (globals);
11217 return _bfd_elf_adjust_dynamic_copy (h, s);
11220 /* Allocate space in .plt, .got and associated reloc sections for
11224 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11226 struct bfd_link_info *info;
11227 struct elf32_arm_link_hash_table *htab;
11228 struct elf32_arm_link_hash_entry *eh;
11229 struct elf32_arm_relocs_copied *p;
11230 bfd_signed_vma thumb_refs;
11232 eh = (struct elf32_arm_link_hash_entry *) h;
11234 if (h->root.type == bfd_link_hash_indirect)
11237 if (h->root.type == bfd_link_hash_warning)
11238 /* When warning symbols are created, they **replace** the "real"
11239 entry in the hash table, thus we never get to see the real
11240 symbol in a hash traversal. So look at it now. */
11241 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11243 info = (struct bfd_link_info *) inf;
11244 htab = elf32_arm_hash_table (info);
11246 if (htab->root.dynamic_sections_created
11247 && h->plt.refcount > 0)
11249 /* Make sure this symbol is output as a dynamic symbol.
11250 Undefined weak syms won't yet be marked as dynamic. */
11251 if (h->dynindx == -1
11252 && !h->forced_local)
11254 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11259 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11261 asection *s = htab->splt;
11263 /* If this is the first .plt entry, make room for the special
11266 s->size += htab->plt_header_size;
11268 h->plt.offset = s->size;
11270 /* If we will insert a Thumb trampoline before this PLT, leave room
11272 thumb_refs = eh->plt_thumb_refcount;
11273 if (!htab->use_blx)
11274 thumb_refs += eh->plt_maybe_thumb_refcount;
11276 if (thumb_refs > 0)
11278 h->plt.offset += PLT_THUMB_STUB_SIZE;
11279 s->size += PLT_THUMB_STUB_SIZE;
11282 /* If this symbol is not defined in a regular file, and we are
11283 not generating a shared library, then set the symbol to this
11284 location in the .plt. This is required to make function
11285 pointers compare as equal between the normal executable and
11286 the shared library. */
11288 && !h->def_regular)
11290 h->root.u.def.section = s;
11291 h->root.u.def.value = h->plt.offset;
11293 /* Make sure the function is not marked as Thumb, in case
11294 it is the target of an ABS32 relocation, which will
11295 point to the PLT entry. */
11296 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11297 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11300 /* Make room for this entry. */
11301 s->size += htab->plt_entry_size;
11303 if (!htab->symbian_p)
11305 /* We also need to make an entry in the .got.plt section, which
11306 will be placed in the .got section by the linker script. */
11307 eh->plt_got_offset = htab->sgotplt->size;
11308 htab->sgotplt->size += 4;
11311 /* We also need to make an entry in the .rel(a).plt section. */
11312 htab->srelplt->size += RELOC_SIZE (htab);
11314 /* VxWorks executables have a second set of relocations for
11315 each PLT entry. They go in a separate relocation section,
11316 which is processed by the kernel loader. */
11317 if (htab->vxworks_p && !info->shared)
11319 /* There is a relocation for the initial PLT entry:
11320 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11321 if (h->plt.offset == htab->plt_header_size)
11322 htab->srelplt2->size += RELOC_SIZE (htab);
11324 /* There are two extra relocations for each subsequent
11325 PLT entry: an R_ARM_32 relocation for the GOT entry,
11326 and an R_ARM_32 relocation for the PLT entry. */
11327 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11332 h->plt.offset = (bfd_vma) -1;
11338 h->plt.offset = (bfd_vma) -1;
11342 if (h->got.refcount > 0)
11346 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11349 /* Make sure this symbol is output as a dynamic symbol.
11350 Undefined weak syms won't yet be marked as dynamic. */
11351 if (h->dynindx == -1
11352 && !h->forced_local)
11354 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11358 if (!htab->symbian_p)
11361 h->got.offset = s->size;
11363 if (tls_type == GOT_UNKNOWN)
11366 if (tls_type == GOT_NORMAL)
11367 /* Non-TLS symbols need one GOT slot. */
11371 if (tls_type & GOT_TLS_GD)
11372 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11374 if (tls_type & GOT_TLS_IE)
11375 /* R_ARM_TLS_IE32 needs one GOT slot. */
11379 dyn = htab->root.dynamic_sections_created;
11382 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11384 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11387 if (tls_type != GOT_NORMAL
11388 && (info->shared || indx != 0)
11389 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11390 || h->root.type != bfd_link_hash_undefweak))
11392 if (tls_type & GOT_TLS_IE)
11393 htab->srelgot->size += RELOC_SIZE (htab);
11395 if (tls_type & GOT_TLS_GD)
11396 htab->srelgot->size += RELOC_SIZE (htab);
11398 if ((tls_type & GOT_TLS_GD) && indx != 0)
11399 htab->srelgot->size += RELOC_SIZE (htab);
11401 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11402 || h->root.type != bfd_link_hash_undefweak)
11404 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11405 htab->srelgot->size += RELOC_SIZE (htab);
11409 h->got.offset = (bfd_vma) -1;
11411 /* Allocate stubs for exported Thumb functions on v4t. */
11412 if (!htab->use_blx && h->dynindx != -1
11414 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11415 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11417 struct elf_link_hash_entry * th;
11418 struct bfd_link_hash_entry * bh;
11419 struct elf_link_hash_entry * myh;
11423 /* Create a new symbol to regist the real location of the function. */
11424 s = h->root.u.def.section;
11425 sprintf (name, "__real_%s", h->root.root.string);
11426 _bfd_generic_link_add_one_symbol (info, s->owner,
11427 name, BSF_GLOBAL, s,
11428 h->root.u.def.value,
11429 NULL, TRUE, FALSE, &bh);
11431 myh = (struct elf_link_hash_entry *) bh;
11432 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11433 myh->forced_local = 1;
11434 eh->export_glue = myh;
11435 th = record_arm_to_thumb_glue (info, h);
11436 /* Point the symbol at the stub. */
11437 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11438 h->root.u.def.section = th->root.u.def.section;
11439 h->root.u.def.value = th->root.u.def.value & ~1;
11442 if (eh->relocs_copied == NULL)
11445 /* In the shared -Bsymbolic case, discard space allocated for
11446 dynamic pc-relative relocs against symbols which turn out to be
11447 defined in regular objects. For the normal shared case, discard
11448 space for pc-relative relocs that have become local due to symbol
11449 visibility changes. */
11451 if (info->shared || htab->root.is_relocatable_executable)
11453 /* The only relocs that use pc_count are R_ARM_REL32 and
11454 R_ARM_REL32_NOI, which will appear on something like
11455 ".long foo - .". We want calls to protected symbols to resolve
11456 directly to the function rather than going via the plt. If people
11457 want function pointer comparisons to work as expected then they
11458 should avoid writing assembly like ".long foo - .". */
11459 if (SYMBOL_CALLS_LOCAL (info, h))
11461 struct elf32_arm_relocs_copied **pp;
11463 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11465 p->count -= p->pc_count;
11474 if (elf32_arm_hash_table (info)->vxworks_p)
11476 struct elf32_arm_relocs_copied **pp;
11478 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11480 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11487 /* Also discard relocs on undefined weak syms with non-default
11489 if (eh->relocs_copied != NULL
11490 && h->root.type == bfd_link_hash_undefweak)
11492 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11493 eh->relocs_copied = NULL;
11495 /* Make sure undefined weak symbols are output as a dynamic
11497 else if (h->dynindx == -1
11498 && !h->forced_local)
11500 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11505 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11506 && h->root.type == bfd_link_hash_new)
11508 /* Output absolute symbols so that we can create relocations
11509 against them. For normal symbols we output a relocation
11510 against the section that contains them. */
11511 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11518 /* For the non-shared case, discard space for relocs against
11519 symbols which turn out to need copy relocs or are not
11522 if (!h->non_got_ref
11523 && ((h->def_dynamic
11524 && !h->def_regular)
11525 || (htab->root.dynamic_sections_created
11526 && (h->root.type == bfd_link_hash_undefweak
11527 || h->root.type == bfd_link_hash_undefined))))
11529 /* Make sure this symbol is output as a dynamic symbol.
11530 Undefined weak syms won't yet be marked as dynamic. */
11531 if (h->dynindx == -1
11532 && !h->forced_local)
11534 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11538 /* If that succeeded, we know we'll be keeping all the
11540 if (h->dynindx != -1)
11544 eh->relocs_copied = NULL;
11549 /* Finally, allocate space. */
11550 for (p = eh->relocs_copied; p != NULL; p = p->next)
11552 asection *sreloc = elf_section_data (p->section)->sreloc;
11553 sreloc->size += p->count * RELOC_SIZE (htab);
11559 /* Find any dynamic relocs that apply to read-only sections. */
11562 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11564 struct elf32_arm_link_hash_entry * eh;
11565 struct elf32_arm_relocs_copied * p;
11567 if (h->root.type == bfd_link_hash_warning)
11568 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11570 eh = (struct elf32_arm_link_hash_entry *) h;
11571 for (p = eh->relocs_copied; p != NULL; p = p->next)
11573 asection *s = p->section;
11575 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11577 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11579 info->flags |= DF_TEXTREL;
11581 /* Not an error, just cut short the traversal. */
11589 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11592 struct elf32_arm_link_hash_table *globals;
11594 globals = elf32_arm_hash_table (info);
11595 globals->byteswap_code = byteswap_code;
11598 /* Set the sizes of the dynamic sections. */
11601 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11602 struct bfd_link_info * info)
11607 bfd_boolean relocs;
11609 struct elf32_arm_link_hash_table *htab;
11611 htab = elf32_arm_hash_table (info);
11612 dynobj = elf_hash_table (info)->dynobj;
11613 BFD_ASSERT (dynobj != NULL);
11614 check_use_blx (htab);
11616 if (elf_hash_table (info)->dynamic_sections_created)
11618 /* Set the contents of the .interp section to the interpreter. */
11619 if (info->executable)
11621 s = bfd_get_section_by_name (dynobj, ".interp");
11622 BFD_ASSERT (s != NULL);
11623 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11624 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11628 /* Set up .got offsets for local syms, and space for local dynamic
11630 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11632 bfd_signed_vma *local_got;
11633 bfd_signed_vma *end_local_got;
11634 char *local_tls_type;
11635 bfd_size_type locsymcount;
11636 Elf_Internal_Shdr *symtab_hdr;
11638 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11640 if (! is_arm_elf (ibfd))
11643 for (s = ibfd->sections; s != NULL; s = s->next)
11645 struct elf32_arm_relocs_copied *p;
11647 for (p = (struct elf32_arm_relocs_copied *)
11648 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11650 if (!bfd_is_abs_section (p->section)
11651 && bfd_is_abs_section (p->section->output_section))
11653 /* Input section has been discarded, either because
11654 it is a copy of a linkonce section or due to
11655 linker script /DISCARD/, so we'll be discarding
11658 else if (is_vxworks
11659 && strcmp (p->section->output_section->name,
11662 /* Relocations in vxworks .tls_vars sections are
11663 handled specially by the loader. */
11665 else if (p->count != 0)
11667 srel = elf_section_data (p->section)->sreloc;
11668 srel->size += p->count * RELOC_SIZE (htab);
11669 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11670 info->flags |= DF_TEXTREL;
11675 local_got = elf_local_got_refcounts (ibfd);
11679 symtab_hdr = & elf_symtab_hdr (ibfd);
11680 locsymcount = symtab_hdr->sh_info;
11681 end_local_got = local_got + locsymcount;
11682 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11684 srel = htab->srelgot;
11685 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11687 if (*local_got > 0)
11689 *local_got = s->size;
11690 if (*local_tls_type & GOT_TLS_GD)
11691 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11693 if (*local_tls_type & GOT_TLS_IE)
11695 if (*local_tls_type == GOT_NORMAL)
11698 if (info->shared || *local_tls_type == GOT_TLS_GD)
11699 srel->size += RELOC_SIZE (htab);
11702 *local_got = (bfd_vma) -1;
11706 if (htab->tls_ldm_got.refcount > 0)
11708 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11709 for R_ARM_TLS_LDM32 relocations. */
11710 htab->tls_ldm_got.offset = htab->sgot->size;
11711 htab->sgot->size += 8;
11713 htab->srelgot->size += RELOC_SIZE (htab);
11716 htab->tls_ldm_got.offset = -1;
11718 /* Allocate global sym .plt and .got entries, and space for global
11719 sym dynamic relocs. */
11720 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11722 /* Here we rummage through the found bfds to collect glue information. */
11723 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11725 if (! is_arm_elf (ibfd))
11728 /* Initialise mapping tables for code/data. */
11729 bfd_elf32_arm_init_maps (ibfd);
11731 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11732 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11733 /* xgettext:c-format */
11734 _bfd_error_handler (_("Errors encountered processing file %s"),
11738 /* Allocate space for the glue sections now that we've sized them. */
11739 bfd_elf32_arm_allocate_interworking_sections (info);
11741 /* The check_relocs and adjust_dynamic_symbol entry points have
11742 determined the sizes of the various dynamic sections. Allocate
11743 memory for them. */
11746 for (s = dynobj->sections; s != NULL; s = s->next)
11750 if ((s->flags & SEC_LINKER_CREATED) == 0)
11753 /* It's OK to base decisions on the section name, because none
11754 of the dynobj section names depend upon the input files. */
11755 name = bfd_get_section_name (dynobj, s);
11757 if (strcmp (name, ".plt") == 0)
11759 /* Remember whether there is a PLT. */
11760 plt = s->size != 0;
11762 else if (CONST_STRNEQ (name, ".rel"))
11766 /* Remember whether there are any reloc sections other
11767 than .rel(a).plt and .rela.plt.unloaded. */
11768 if (s != htab->srelplt && s != htab->srelplt2)
11771 /* We use the reloc_count field as a counter if we need
11772 to copy relocs into the output file. */
11773 s->reloc_count = 0;
11776 else if (! CONST_STRNEQ (name, ".got")
11777 && strcmp (name, ".dynbss") != 0)
11779 /* It's not one of our sections, so don't allocate space. */
11785 /* If we don't need this section, strip it from the
11786 output file. This is mostly to handle .rel(a).bss and
11787 .rel(a).plt. We must create both sections in
11788 create_dynamic_sections, because they must be created
11789 before the linker maps input sections to output
11790 sections. The linker does that before
11791 adjust_dynamic_symbol is called, and it is that
11792 function which decides whether anything needs to go
11793 into these sections. */
11794 s->flags |= SEC_EXCLUDE;
11798 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11801 /* Allocate memory for the section contents. */
11802 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
11803 if (s->contents == NULL)
11807 if (elf_hash_table (info)->dynamic_sections_created)
11809 /* Add some entries to the .dynamic section. We fill in the
11810 values later, in elf32_arm_finish_dynamic_sections, but we
11811 must add the entries now so that we get the correct size for
11812 the .dynamic section. The DT_DEBUG entry is filled in by the
11813 dynamic linker and used by the debugger. */
11814 #define add_dynamic_entry(TAG, VAL) \
11815 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11817 if (info->executable)
11819 if (!add_dynamic_entry (DT_DEBUG, 0))
11825 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11826 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11827 || !add_dynamic_entry (DT_PLTREL,
11828 htab->use_rel ? DT_REL : DT_RELA)
11829 || !add_dynamic_entry (DT_JMPREL, 0))
11837 if (!add_dynamic_entry (DT_REL, 0)
11838 || !add_dynamic_entry (DT_RELSZ, 0)
11839 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11844 if (!add_dynamic_entry (DT_RELA, 0)
11845 || !add_dynamic_entry (DT_RELASZ, 0)
11846 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11851 /* If any dynamic relocs apply to a read-only section,
11852 then we need a DT_TEXTREL entry. */
11853 if ((info->flags & DF_TEXTREL) == 0)
11854 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11857 if ((info->flags & DF_TEXTREL) != 0)
11859 if (!add_dynamic_entry (DT_TEXTREL, 0))
11862 if (htab->vxworks_p
11863 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11866 #undef add_dynamic_entry
11871 /* Finish up dynamic symbol handling. We set the contents of various
11872 dynamic sections here. */
11875 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11876 struct bfd_link_info * info,
11877 struct elf_link_hash_entry * h,
11878 Elf_Internal_Sym * sym)
11881 struct elf32_arm_link_hash_table *htab;
11882 struct elf32_arm_link_hash_entry *eh;
11884 dynobj = elf_hash_table (info)->dynobj;
11885 htab = elf32_arm_hash_table (info);
11886 eh = (struct elf32_arm_link_hash_entry *) h;
11888 if (h->plt.offset != (bfd_vma) -1)
11894 Elf_Internal_Rela rel;
11896 /* This symbol has an entry in the procedure linkage table. Set
11899 BFD_ASSERT (h->dynindx != -1);
11901 splt = bfd_get_section_by_name (dynobj, ".plt");
11902 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11903 BFD_ASSERT (splt != NULL && srel != NULL);
11905 /* Fill in the entry in the procedure linkage table. */
11906 if (htab->symbian_p)
11908 put_arm_insn (htab, output_bfd,
11909 elf32_arm_symbian_plt_entry[0],
11910 splt->contents + h->plt.offset);
11911 bfd_put_32 (output_bfd,
11912 elf32_arm_symbian_plt_entry[1],
11913 splt->contents + h->plt.offset + 4);
11915 /* Fill in the entry in the .rel.plt section. */
11916 rel.r_offset = (splt->output_section->vma
11917 + splt->output_offset
11918 + h->plt.offset + 4);
11919 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11921 /* Get the index in the procedure linkage table which
11922 corresponds to this symbol. This is the index of this symbol
11923 in all the symbols for which we are making plt entries. The
11924 first entry in the procedure linkage table is reserved. */
11925 plt_index = ((h->plt.offset - htab->plt_header_size)
11926 / htab->plt_entry_size);
11930 bfd_vma got_offset, got_address, plt_address;
11931 bfd_vma got_displacement;
11935 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11936 BFD_ASSERT (sgot != NULL);
11938 /* Get the offset into the .got.plt table of the entry that
11939 corresponds to this function. */
11940 got_offset = eh->plt_got_offset;
11942 /* Get the index in the procedure linkage table which
11943 corresponds to this symbol. This is the index of this symbol
11944 in all the symbols for which we are making plt entries. The
11945 first three entries in .got.plt are reserved; after that
11946 symbols appear in the same order as in .plt. */
11947 plt_index = (got_offset - 12) / 4;
11949 /* Calculate the address of the GOT entry. */
11950 got_address = (sgot->output_section->vma
11951 + sgot->output_offset
11954 /* ...and the address of the PLT entry. */
11955 plt_address = (splt->output_section->vma
11956 + splt->output_offset
11959 ptr = htab->splt->contents + h->plt.offset;
11960 if (htab->vxworks_p && info->shared)
11965 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11967 val = elf32_arm_vxworks_shared_plt_entry[i];
11969 val |= got_address - sgot->output_section->vma;
11971 val |= plt_index * RELOC_SIZE (htab);
11972 if (i == 2 || i == 5)
11973 bfd_put_32 (output_bfd, val, ptr);
11975 put_arm_insn (htab, output_bfd, val, ptr);
11978 else if (htab->vxworks_p)
11983 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11985 val = elf32_arm_vxworks_exec_plt_entry[i];
11987 val |= got_address;
11989 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
11991 val |= plt_index * RELOC_SIZE (htab);
11992 if (i == 2 || i == 5)
11993 bfd_put_32 (output_bfd, val, ptr);
11995 put_arm_insn (htab, output_bfd, val, ptr);
11998 loc = (htab->srelplt2->contents
11999 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12001 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12002 referencing the GOT for this PLT entry. */
12003 rel.r_offset = plt_address + 8;
12004 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12005 rel.r_addend = got_offset;
12006 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12007 loc += RELOC_SIZE (htab);
12009 /* Create the R_ARM_ABS32 relocation referencing the
12010 beginning of the PLT for this GOT entry. */
12011 rel.r_offset = got_address;
12012 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12014 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12018 bfd_signed_vma thumb_refs;
12019 /* Calculate the displacement between the PLT slot and the
12020 entry in the GOT. The eight-byte offset accounts for the
12021 value produced by adding to pc in the first instruction
12022 of the PLT stub. */
12023 got_displacement = got_address - (plt_address + 8);
12025 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12027 thumb_refs = eh->plt_thumb_refcount;
12028 if (!htab->use_blx)
12029 thumb_refs += eh->plt_maybe_thumb_refcount;
12031 if (thumb_refs > 0)
12033 put_thumb_insn (htab, output_bfd,
12034 elf32_arm_plt_thumb_stub[0], ptr - 4);
12035 put_thumb_insn (htab, output_bfd,
12036 elf32_arm_plt_thumb_stub[1], ptr - 2);
12039 put_arm_insn (htab, output_bfd,
12040 elf32_arm_plt_entry[0]
12041 | ((got_displacement & 0x0ff00000) >> 20),
12043 put_arm_insn (htab, output_bfd,
12044 elf32_arm_plt_entry[1]
12045 | ((got_displacement & 0x000ff000) >> 12),
12047 put_arm_insn (htab, output_bfd,
12048 elf32_arm_plt_entry[2]
12049 | (got_displacement & 0x00000fff),
12051 #ifdef FOUR_WORD_PLT
12052 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12056 /* Fill in the entry in the global offset table. */
12057 bfd_put_32 (output_bfd,
12058 (splt->output_section->vma
12059 + splt->output_offset),
12060 sgot->contents + got_offset);
12062 /* Fill in the entry in the .rel(a).plt section. */
12064 rel.r_offset = got_address;
12065 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12068 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12069 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12071 if (!h->def_regular)
12073 /* Mark the symbol as undefined, rather than as defined in
12074 the .plt section. Leave the value alone. */
12075 sym->st_shndx = SHN_UNDEF;
12076 /* If the symbol is weak, we do need to clear the value.
12077 Otherwise, the PLT entry would provide a definition for
12078 the symbol even if the symbol wasn't defined anywhere,
12079 and so the symbol would never be NULL. */
12080 if (!h->ref_regular_nonweak)
12085 if (h->got.offset != (bfd_vma) -1
12086 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12087 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12091 Elf_Internal_Rela rel;
12095 /* This symbol has an entry in the global offset table. Set it
12097 sgot = bfd_get_section_by_name (dynobj, ".got");
12098 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12099 BFD_ASSERT (sgot != NULL && srel != NULL);
12101 offset = (h->got.offset & ~(bfd_vma) 1);
12103 rel.r_offset = (sgot->output_section->vma
12104 + sgot->output_offset
12107 /* If this is a static link, or it is a -Bsymbolic link and the
12108 symbol is defined locally or was forced to be local because
12109 of a version file, we just want to emit a RELATIVE reloc.
12110 The entry in the global offset table will already have been
12111 initialized in the relocate_section function. */
12113 && SYMBOL_REFERENCES_LOCAL (info, h))
12115 BFD_ASSERT ((h->got.offset & 1) != 0);
12116 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12117 if (!htab->use_rel)
12119 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12120 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12125 BFD_ASSERT ((h->got.offset & 1) == 0);
12126 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12127 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12130 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12131 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12137 Elf_Internal_Rela rel;
12140 /* This symbol needs a copy reloc. Set it up. */
12141 BFD_ASSERT (h->dynindx != -1
12142 && (h->root.type == bfd_link_hash_defined
12143 || h->root.type == bfd_link_hash_defweak));
12145 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12146 RELOC_SECTION (htab, ".bss"));
12147 BFD_ASSERT (s != NULL);
12150 rel.r_offset = (h->root.u.def.value
12151 + h->root.u.def.section->output_section->vma
12152 + h->root.u.def.section->output_offset);
12153 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12154 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12155 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12158 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12159 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12160 to the ".got" section. */
12161 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12162 || (!htab->vxworks_p && h == htab->root.hgot))
12163 sym->st_shndx = SHN_ABS;
12168 /* Finish up the dynamic sections. */
12171 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12177 dynobj = elf_hash_table (info)->dynobj;
12179 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12180 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12181 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12183 if (elf_hash_table (info)->dynamic_sections_created)
12186 Elf32_External_Dyn *dyncon, *dynconend;
12187 struct elf32_arm_link_hash_table *htab;
12189 htab = elf32_arm_hash_table (info);
12190 splt = bfd_get_section_by_name (dynobj, ".plt");
12191 BFD_ASSERT (splt != NULL && sdyn != NULL);
12193 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12194 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12196 for (; dyncon < dynconend; dyncon++)
12198 Elf_Internal_Dyn dyn;
12202 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12209 if (htab->vxworks_p
12210 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12211 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12216 goto get_vma_if_bpabi;
12219 goto get_vma_if_bpabi;
12222 goto get_vma_if_bpabi;
12224 name = ".gnu.version";
12225 goto get_vma_if_bpabi;
12227 name = ".gnu.version_d";
12228 goto get_vma_if_bpabi;
12230 name = ".gnu.version_r";
12231 goto get_vma_if_bpabi;
12237 name = RELOC_SECTION (htab, ".plt");
12239 s = bfd_get_section_by_name (output_bfd, name);
12240 BFD_ASSERT (s != NULL);
12241 if (!htab->symbian_p)
12242 dyn.d_un.d_ptr = s->vma;
12244 /* In the BPABI, tags in the PT_DYNAMIC section point
12245 at the file offset, not the memory address, for the
12246 convenience of the post linker. */
12247 dyn.d_un.d_ptr = s->filepos;
12248 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12252 if (htab->symbian_p)
12257 s = bfd_get_section_by_name (output_bfd,
12258 RELOC_SECTION (htab, ".plt"));
12259 BFD_ASSERT (s != NULL);
12260 dyn.d_un.d_val = s->size;
12261 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12266 if (!htab->symbian_p)
12268 /* My reading of the SVR4 ABI indicates that the
12269 procedure linkage table relocs (DT_JMPREL) should be
12270 included in the overall relocs (DT_REL). This is
12271 what Solaris does. However, UnixWare can not handle
12272 that case. Therefore, we override the DT_RELSZ entry
12273 here to make it not include the JMPREL relocs. Since
12274 the linker script arranges for .rel(a).plt to follow all
12275 other relocation sections, we don't have to worry
12276 about changing the DT_REL entry. */
12277 s = bfd_get_section_by_name (output_bfd,
12278 RELOC_SECTION (htab, ".plt"));
12280 dyn.d_un.d_val -= s->size;
12281 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12284 /* Fall through. */
12288 /* In the BPABI, the DT_REL tag must point at the file
12289 offset, not the VMA, of the first relocation
12290 section. So, we use code similar to that in
12291 elflink.c, but do not check for SHF_ALLOC on the
12292 relcoation section, since relocations sections are
12293 never allocated under the BPABI. The comments above
12294 about Unixware notwithstanding, we include all of the
12295 relocations here. */
12296 if (htab->symbian_p)
12299 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12300 ? SHT_REL : SHT_RELA);
12301 dyn.d_un.d_val = 0;
12302 for (i = 1; i < elf_numsections (output_bfd); i++)
12304 Elf_Internal_Shdr *hdr
12305 = elf_elfsections (output_bfd)[i];
12306 if (hdr->sh_type == type)
12308 if (dyn.d_tag == DT_RELSZ
12309 || dyn.d_tag == DT_RELASZ)
12310 dyn.d_un.d_val += hdr->sh_size;
12311 else if ((ufile_ptr) hdr->sh_offset
12312 <= dyn.d_un.d_val - 1)
12313 dyn.d_un.d_val = hdr->sh_offset;
12316 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12320 /* Set the bottom bit of DT_INIT/FINI if the
12321 corresponding function is Thumb. */
12323 name = info->init_function;
12326 name = info->fini_function;
12328 /* If it wasn't set by elf_bfd_final_link
12329 then there is nothing to adjust. */
12330 if (dyn.d_un.d_val != 0)
12332 struct elf_link_hash_entry * eh;
12334 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12335 FALSE, FALSE, TRUE);
12337 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12339 dyn.d_un.d_val |= 1;
12340 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12347 /* Fill in the first entry in the procedure linkage table. */
12348 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12350 const bfd_vma *plt0_entry;
12351 bfd_vma got_address, plt_address, got_displacement;
12353 /* Calculate the addresses of the GOT and PLT. */
12354 got_address = sgot->output_section->vma + sgot->output_offset;
12355 plt_address = splt->output_section->vma + splt->output_offset;
12357 if (htab->vxworks_p)
12359 /* The VxWorks GOT is relocated by the dynamic linker.
12360 Therefore, we must emit relocations rather than simply
12361 computing the values now. */
12362 Elf_Internal_Rela rel;
12364 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12365 put_arm_insn (htab, output_bfd, plt0_entry[0],
12366 splt->contents + 0);
12367 put_arm_insn (htab, output_bfd, plt0_entry[1],
12368 splt->contents + 4);
12369 put_arm_insn (htab, output_bfd, plt0_entry[2],
12370 splt->contents + 8);
12371 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12373 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12374 rel.r_offset = plt_address + 12;
12375 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12377 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12378 htab->srelplt2->contents);
12382 got_displacement = got_address - (plt_address + 16);
12384 plt0_entry = elf32_arm_plt0_entry;
12385 put_arm_insn (htab, output_bfd, plt0_entry[0],
12386 splt->contents + 0);
12387 put_arm_insn (htab, output_bfd, plt0_entry[1],
12388 splt->contents + 4);
12389 put_arm_insn (htab, output_bfd, plt0_entry[2],
12390 splt->contents + 8);
12391 put_arm_insn (htab, output_bfd, plt0_entry[3],
12392 splt->contents + 12);
12394 #ifdef FOUR_WORD_PLT
12395 /* The displacement value goes in the otherwise-unused
12396 last word of the second entry. */
12397 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12399 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12404 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12405 really seem like the right value. */
12406 if (splt->output_section->owner == output_bfd)
12407 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12409 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12411 /* Correct the .rel(a).plt.unloaded relocations. They will have
12412 incorrect symbol indexes. */
12416 num_plts = ((htab->splt->size - htab->plt_header_size)
12417 / htab->plt_entry_size);
12418 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12420 for (; num_plts; num_plts--)
12422 Elf_Internal_Rela rel;
12424 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12425 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12426 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12427 p += RELOC_SIZE (htab);
12429 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12430 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12431 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12432 p += RELOC_SIZE (htab);
12437 /* Fill in the first three entries in the global offset table. */
12440 if (sgot->size > 0)
12443 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12445 bfd_put_32 (output_bfd,
12446 sdyn->output_section->vma + sdyn->output_offset,
12448 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12449 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12452 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12459 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12461 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12462 struct elf32_arm_link_hash_table *globals;
12464 i_ehdrp = elf_elfheader (abfd);
12466 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12467 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12469 i_ehdrp->e_ident[EI_OSABI] = 0;
12470 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12474 globals = elf32_arm_hash_table (link_info);
12475 if (globals->byteswap_code)
12476 i_ehdrp->e_flags |= EF_ARM_BE8;
12480 static enum elf_reloc_type_class
12481 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12483 switch ((int) ELF32_R_TYPE (rela->r_info))
12485 case R_ARM_RELATIVE:
12486 return reloc_class_relative;
12487 case R_ARM_JUMP_SLOT:
12488 return reloc_class_plt;
12490 return reloc_class_copy;
12492 return reloc_class_normal;
12496 /* Set the right machine number for an Arm ELF file. */
12499 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12501 if (hdr->sh_type == SHT_NOTE)
12502 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12508 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12510 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12513 /* Return TRUE if this is an unwinding table entry. */
12516 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12518 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12519 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12523 /* Set the type and flags for an ARM section. We do this by
12524 the section name, which is a hack, but ought to work. */
12527 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12531 name = bfd_get_section_name (abfd, sec);
12533 if (is_arm_elf_unwind_section_name (abfd, name))
12535 hdr->sh_type = SHT_ARM_EXIDX;
12536 hdr->sh_flags |= SHF_LINK_ORDER;
12541 /* Handle an ARM specific section when reading an object file. This is
12542 called when bfd_section_from_shdr finds a section with an unknown
12546 elf32_arm_section_from_shdr (bfd *abfd,
12547 Elf_Internal_Shdr * hdr,
12551 /* There ought to be a place to keep ELF backend specific flags, but
12552 at the moment there isn't one. We just keep track of the
12553 sections by their name, instead. Fortunately, the ABI gives
12554 names for all the ARM specific sections, so we will probably get
12556 switch (hdr->sh_type)
12558 case SHT_ARM_EXIDX:
12559 case SHT_ARM_PREEMPTMAP:
12560 case SHT_ARM_ATTRIBUTES:
12567 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12573 /* A structure used to record a list of sections, independently
12574 of the next and prev fields in the asection structure. */
12575 typedef struct section_list
12578 struct section_list * next;
12579 struct section_list * prev;
12583 /* Unfortunately we need to keep a list of sections for which
12584 an _arm_elf_section_data structure has been allocated. This
12585 is because it is possible for functions like elf32_arm_write_section
12586 to be called on a section which has had an elf_data_structure
12587 allocated for it (and so the used_by_bfd field is valid) but
12588 for which the ARM extended version of this structure - the
12589 _arm_elf_section_data structure - has not been allocated. */
12590 static section_list * sections_with_arm_elf_section_data = NULL;
12593 record_section_with_arm_elf_section_data (asection * sec)
12595 struct section_list * entry;
12597 entry = (struct section_list *) bfd_malloc (sizeof (* entry));
12601 entry->next = sections_with_arm_elf_section_data;
12602 entry->prev = NULL;
12603 if (entry->next != NULL)
12604 entry->next->prev = entry;
12605 sections_with_arm_elf_section_data = entry;
12608 static struct section_list *
12609 find_arm_elf_section_entry (asection * sec)
12611 struct section_list * entry;
12612 static struct section_list * last_entry = NULL;
12614 /* This is a short cut for the typical case where the sections are added
12615 to the sections_with_arm_elf_section_data list in forward order and
12616 then looked up here in backwards order. This makes a real difference
12617 to the ld-srec/sec64k.exp linker test. */
12618 entry = sections_with_arm_elf_section_data;
12619 if (last_entry != NULL)
12621 if (last_entry->sec == sec)
12622 entry = last_entry;
12623 else if (last_entry->next != NULL
12624 && last_entry->next->sec == sec)
12625 entry = last_entry->next;
12628 for (; entry; entry = entry->next)
12629 if (entry->sec == sec)
12633 /* Record the entry prior to this one - it is the entry we are most
12634 likely to want to locate next time. Also this way if we have been
12635 called from unrecord_section_with_arm_elf_section_data() we will not
12636 be caching a pointer that is about to be freed. */
12637 last_entry = entry->prev;
12642 static _arm_elf_section_data *
12643 get_arm_elf_section_data (asection * sec)
12645 struct section_list * entry;
12647 entry = find_arm_elf_section_entry (sec);
12650 return elf32_arm_section_data (entry->sec);
12656 unrecord_section_with_arm_elf_section_data (asection * sec)
12658 struct section_list * entry;
12660 entry = find_arm_elf_section_entry (sec);
12664 if (entry->prev != NULL)
12665 entry->prev->next = entry->next;
12666 if (entry->next != NULL)
12667 entry->next->prev = entry->prev;
12668 if (entry == sections_with_arm_elf_section_data)
12669 sections_with_arm_elf_section_data = entry->next;
12678 struct bfd_link_info *info;
12681 int (*func) (void *, const char *, Elf_Internal_Sym *,
12682 asection *, struct elf_link_hash_entry *);
12683 } output_arch_syminfo;
12685 enum map_symbol_type
12693 /* Output a single mapping symbol. */
12696 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12697 enum map_symbol_type type,
12700 static const char *names[3] = {"$a", "$t", "$d"};
12701 struct elf32_arm_link_hash_table *htab;
12702 Elf_Internal_Sym sym;
12704 htab = elf32_arm_hash_table (osi->info);
12705 sym.st_value = osi->sec->output_section->vma
12706 + osi->sec->output_offset
12710 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12711 sym.st_shndx = osi->sec_shndx;
12712 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12716 /* Output mapping symbols for PLT entries associated with H. */
12719 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12721 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12722 struct elf32_arm_link_hash_table *htab;
12723 struct elf32_arm_link_hash_entry *eh;
12726 htab = elf32_arm_hash_table (osi->info);
12728 if (h->root.type == bfd_link_hash_indirect)
12731 if (h->root.type == bfd_link_hash_warning)
12732 /* When warning symbols are created, they **replace** the "real"
12733 entry in the hash table, thus we never get to see the real
12734 symbol in a hash traversal. So look at it now. */
12735 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12737 if (h->plt.offset == (bfd_vma) -1)
12740 eh = (struct elf32_arm_link_hash_entry *) h;
12741 addr = h->plt.offset;
12742 if (htab->symbian_p)
12744 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12746 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12749 else if (htab->vxworks_p)
12751 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12753 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12755 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12757 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12762 bfd_signed_vma thumb_refs;
12764 thumb_refs = eh->plt_thumb_refcount;
12765 if (!htab->use_blx)
12766 thumb_refs += eh->plt_maybe_thumb_refcount;
12768 if (thumb_refs > 0)
12770 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12773 #ifdef FOUR_WORD_PLT
12774 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12776 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12779 /* A three-word PLT with no Thumb thunk contains only Arm code,
12780 so only need to output a mapping symbol for the first PLT entry and
12781 entries with thumb thunks. */
12782 if (thumb_refs > 0 || addr == 20)
12784 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12793 /* Output a single local symbol for a generated stub. */
12796 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12797 bfd_vma offset, bfd_vma size)
12799 struct elf32_arm_link_hash_table *htab;
12800 Elf_Internal_Sym sym;
12802 htab = elf32_arm_hash_table (osi->info);
12803 sym.st_value = osi->sec->output_section->vma
12804 + osi->sec->output_offset
12806 sym.st_size = size;
12808 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12809 sym.st_shndx = osi->sec_shndx;
12810 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12814 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12817 struct elf32_arm_stub_hash_entry *stub_entry;
12818 struct bfd_link_info *info;
12819 struct elf32_arm_link_hash_table *htab;
12820 asection *stub_sec;
12823 output_arch_syminfo *osi;
12824 const insn_sequence *template_sequence;
12825 enum stub_insn_type prev_type;
12828 enum map_symbol_type sym_type;
12830 /* Massage our args to the form they really have. */
12831 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12832 osi = (output_arch_syminfo *) in_arg;
12836 htab = elf32_arm_hash_table (info);
12837 stub_sec = stub_entry->stub_sec;
12839 /* Ensure this stub is attached to the current section being
12841 if (stub_sec != osi->sec)
12844 addr = (bfd_vma) stub_entry->stub_offset;
12845 stub_name = stub_entry->output_name;
12847 template_sequence = stub_entry->stub_template;
12848 switch (template_sequence[0].type)
12851 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12856 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12857 stub_entry->stub_size))
12865 prev_type = DATA_TYPE;
12867 for (i = 0; i < stub_entry->stub_template_size; i++)
12869 switch (template_sequence[i].type)
12872 sym_type = ARM_MAP_ARM;
12877 sym_type = ARM_MAP_THUMB;
12881 sym_type = ARM_MAP_DATA;
12889 if (template_sequence[i].type != prev_type)
12891 prev_type = template_sequence[i].type;
12892 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12896 switch (template_sequence[i].type)
12920 /* Output mapping symbols for linker generated sections. */
12923 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12924 struct bfd_link_info *info,
12926 int (*func) (void *, const char *,
12927 Elf_Internal_Sym *,
12929 struct elf_link_hash_entry *))
12931 output_arch_syminfo osi;
12932 struct elf32_arm_link_hash_table *htab;
12934 bfd_size_type size;
12936 htab = elf32_arm_hash_table (info);
12937 check_use_blx (htab);
12943 /* ARM->Thumb glue. */
12944 if (htab->arm_glue_size > 0)
12946 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12947 ARM2THUMB_GLUE_SECTION_NAME);
12949 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12950 (output_bfd, osi.sec->output_section);
12951 if (info->shared || htab->root.is_relocatable_executable
12952 || htab->pic_veneer)
12953 size = ARM2THUMB_PIC_GLUE_SIZE;
12954 else if (htab->use_blx)
12955 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
12957 size = ARM2THUMB_STATIC_GLUE_SIZE;
12959 for (offset = 0; offset < htab->arm_glue_size; offset += size)
12961 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
12962 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
12966 /* Thumb->ARM glue. */
12967 if (htab->thumb_glue_size > 0)
12969 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12970 THUMB2ARM_GLUE_SECTION_NAME);
12972 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12973 (output_bfd, osi.sec->output_section);
12974 size = THUMB2ARM_GLUE_SIZE;
12976 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
12978 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
12979 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
12983 /* ARMv4 BX veneers. */
12984 if (htab->bx_glue_size > 0)
12986 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12987 ARM_BX_GLUE_SECTION_NAME);
12989 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12990 (output_bfd, osi.sec->output_section);
12992 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
12995 /* Long calls stubs. */
12996 if (htab->stub_bfd && htab->stub_bfd->sections)
12998 asection* stub_sec;
13000 for (stub_sec = htab->stub_bfd->sections;
13002 stub_sec = stub_sec->next)
13004 /* Ignore non-stub sections. */
13005 if (!strstr (stub_sec->name, STUB_SUFFIX))
13008 osi.sec = stub_sec;
13010 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13011 (output_bfd, osi.sec->output_section);
13013 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13017 /* Finally, output mapping symbols for the PLT. */
13018 if (!htab->splt || htab->splt->size == 0)
13021 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13022 htab->splt->output_section);
13023 osi.sec = htab->splt;
13024 /* Output mapping symbols for the plt header. SymbianOS does not have a
13026 if (htab->vxworks_p)
13028 /* VxWorks shared libraries have no PLT header. */
13031 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13033 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13037 else if (!htab->symbian_p)
13039 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13041 #ifndef FOUR_WORD_PLT
13042 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13047 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13051 /* Allocate target specific section data. */
13054 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13056 if (!sec->used_by_bfd)
13058 _arm_elf_section_data *sdata;
13059 bfd_size_type amt = sizeof (*sdata);
13061 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13064 sec->used_by_bfd = sdata;
13067 record_section_with_arm_elf_section_data (sec);
13069 return _bfd_elf_new_section_hook (abfd, sec);
13073 /* Used to order a list of mapping symbols by address. */
13076 elf32_arm_compare_mapping (const void * a, const void * b)
13078 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13079 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13081 if (amap->vma > bmap->vma)
13083 else if (amap->vma < bmap->vma)
13085 else if (amap->type > bmap->type)
13086 /* Ensure results do not depend on the host qsort for objects with
13087 multiple mapping symbols at the same address by sorting on type
13090 else if (amap->type < bmap->type)
13096 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13098 static unsigned long
13099 offset_prel31 (unsigned long addr, bfd_vma offset)
13101 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13104 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13108 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13110 unsigned long first_word = bfd_get_32 (output_bfd, from);
13111 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13113 /* High bit of first word is supposed to be zero. */
13114 if ((first_word & 0x80000000ul) == 0)
13115 first_word = offset_prel31 (first_word, offset);
13117 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13118 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13119 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13120 second_word = offset_prel31 (second_word, offset);
13122 bfd_put_32 (output_bfd, first_word, to);
13123 bfd_put_32 (output_bfd, second_word, to + 4);
13126 /* Data for make_branch_to_a8_stub(). */
13128 struct a8_branch_to_stub_data {
13129 asection *writing_section;
13130 bfd_byte *contents;
13134 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13135 places for a particular section. */
13138 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13141 struct elf32_arm_stub_hash_entry *stub_entry;
13142 struct a8_branch_to_stub_data *data;
13143 bfd_byte *contents;
13144 unsigned long branch_insn;
13145 bfd_vma veneered_insn_loc, veneer_entry_loc;
13146 bfd_signed_vma branch_offset;
13148 unsigned int index;
13150 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13151 data = (struct a8_branch_to_stub_data *) in_arg;
13153 if (stub_entry->target_section != data->writing_section
13154 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13157 contents = data->contents;
13159 veneered_insn_loc = stub_entry->target_section->output_section->vma
13160 + stub_entry->target_section->output_offset
13161 + stub_entry->target_value;
13163 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13164 + stub_entry->stub_sec->output_offset
13165 + stub_entry->stub_offset;
13167 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13168 veneered_insn_loc &= ~3u;
13170 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13172 abfd = stub_entry->target_section->owner;
13173 index = stub_entry->target_value;
13175 /* We attempt to avoid this condition by setting stubs_always_after_branch
13176 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13177 This check is just to be on the safe side... */
13178 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13180 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13181 "allocated in unsafe location"), abfd);
13185 switch (stub_entry->stub_type)
13187 case arm_stub_a8_veneer_b:
13188 case arm_stub_a8_veneer_b_cond:
13189 branch_insn = 0xf0009000;
13192 case arm_stub_a8_veneer_blx:
13193 branch_insn = 0xf000e800;
13196 case arm_stub_a8_veneer_bl:
13198 unsigned int i1, j1, i2, j2, s;
13200 branch_insn = 0xf000d000;
13203 if (branch_offset < -16777216 || branch_offset > 16777214)
13205 /* There's not much we can do apart from complain if this
13207 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13208 "of range (input file too large)"), abfd);
13212 /* i1 = not(j1 eor s), so:
13214 j1 = (not i1) eor s. */
13216 branch_insn |= (branch_offset >> 1) & 0x7ff;
13217 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13218 i2 = (branch_offset >> 22) & 1;
13219 i1 = (branch_offset >> 23) & 1;
13220 s = (branch_offset >> 24) & 1;
13223 branch_insn |= j2 << 11;
13224 branch_insn |= j1 << 13;
13225 branch_insn |= s << 26;
13234 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13235 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13240 /* Do code byteswapping. Return FALSE afterwards so that the section is
13241 written out as normal. */
13244 elf32_arm_write_section (bfd *output_bfd,
13245 struct bfd_link_info *link_info,
13247 bfd_byte *contents)
13249 unsigned int mapcount, errcount;
13250 _arm_elf_section_data *arm_data;
13251 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13252 elf32_arm_section_map *map;
13253 elf32_vfp11_erratum_list *errnode;
13256 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13260 /* If this section has not been allocated an _arm_elf_section_data
13261 structure then we cannot record anything. */
13262 arm_data = get_arm_elf_section_data (sec);
13263 if (arm_data == NULL)
13266 mapcount = arm_data->mapcount;
13267 map = arm_data->map;
13268 errcount = arm_data->erratumcount;
13272 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13274 for (errnode = arm_data->erratumlist; errnode != 0;
13275 errnode = errnode->next)
13277 bfd_vma index = errnode->vma - offset;
13279 switch (errnode->type)
13281 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13283 bfd_vma branch_to_veneer;
13284 /* Original condition code of instruction, plus bit mask for
13285 ARM B instruction. */
13286 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13289 /* The instruction is before the label. */
13292 /* Above offset included in -4 below. */
13293 branch_to_veneer = errnode->u.b.veneer->vma
13294 - errnode->vma - 4;
13296 if ((signed) branch_to_veneer < -(1 << 25)
13297 || (signed) branch_to_veneer >= (1 << 25))
13298 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13299 "range"), output_bfd);
13301 insn |= (branch_to_veneer >> 2) & 0xffffff;
13302 contents[endianflip ^ index] = insn & 0xff;
13303 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13304 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13305 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13309 case VFP11_ERRATUM_ARM_VENEER:
13311 bfd_vma branch_from_veneer;
13314 /* Take size of veneer into account. */
13315 branch_from_veneer = errnode->u.v.branch->vma
13316 - errnode->vma - 12;
13318 if ((signed) branch_from_veneer < -(1 << 25)
13319 || (signed) branch_from_veneer >= (1 << 25))
13320 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13321 "range"), output_bfd);
13323 /* Original instruction. */
13324 insn = errnode->u.v.branch->u.b.vfp_insn;
13325 contents[endianflip ^ index] = insn & 0xff;
13326 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13327 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13328 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13330 /* Branch back to insn after original insn. */
13331 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13332 contents[endianflip ^ (index + 4)] = insn & 0xff;
13333 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13334 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13335 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13345 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13347 arm_unwind_table_edit *edit_node
13348 = arm_data->u.exidx.unwind_edit_list;
13349 /* Now, sec->size is the size of the section we will write. The original
13350 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13351 markers) was sec->rawsize. (This isn't the case if we perform no
13352 edits, then rawsize will be zero and we should use size). */
13353 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13354 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13355 unsigned int in_index, out_index;
13356 bfd_vma add_to_offsets = 0;
13358 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13362 unsigned int edit_index = edit_node->index;
13364 if (in_index < edit_index && in_index * 8 < input_size)
13366 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13367 contents + in_index * 8, add_to_offsets);
13371 else if (in_index == edit_index
13372 || (in_index * 8 >= input_size
13373 && edit_index == UINT_MAX))
13375 switch (edit_node->type)
13377 case DELETE_EXIDX_ENTRY:
13379 add_to_offsets += 8;
13382 case INSERT_EXIDX_CANTUNWIND_AT_END:
13384 asection *text_sec = edit_node->linked_section;
13385 bfd_vma text_offset = text_sec->output_section->vma
13386 + text_sec->output_offset
13388 bfd_vma exidx_offset = offset + out_index * 8;
13389 unsigned long prel31_offset;
13391 /* Note: this is meant to be equivalent to an
13392 R_ARM_PREL31 relocation. These synthetic
13393 EXIDX_CANTUNWIND markers are not relocated by the
13394 usual BFD method. */
13395 prel31_offset = (text_offset - exidx_offset)
13398 /* First address we can't unwind. */
13399 bfd_put_32 (output_bfd, prel31_offset,
13400 &edited_contents[out_index * 8]);
13402 /* Code for EXIDX_CANTUNWIND. */
13403 bfd_put_32 (output_bfd, 0x1,
13404 &edited_contents[out_index * 8 + 4]);
13407 add_to_offsets -= 8;
13412 edit_node = edit_node->next;
13417 /* No more edits, copy remaining entries verbatim. */
13418 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13419 contents + in_index * 8, add_to_offsets);
13425 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13426 bfd_set_section_contents (output_bfd, sec->output_section,
13428 (file_ptr) sec->output_offset, sec->size);
13433 /* Fix code to point to Cortex-A8 erratum stubs. */
13434 if (globals->fix_cortex_a8)
13436 struct a8_branch_to_stub_data data;
13438 data.writing_section = sec;
13439 data.contents = contents;
13441 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13448 if (globals->byteswap_code)
13450 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13453 for (i = 0; i < mapcount; i++)
13455 if (i == mapcount - 1)
13458 end = map[i + 1].vma;
13460 switch (map[i].type)
13463 /* Byte swap code words. */
13464 while (ptr + 3 < end)
13466 tmp = contents[ptr];
13467 contents[ptr] = contents[ptr + 3];
13468 contents[ptr + 3] = tmp;
13469 tmp = contents[ptr + 1];
13470 contents[ptr + 1] = contents[ptr + 2];
13471 contents[ptr + 2] = tmp;
13477 /* Byte swap code halfwords. */
13478 while (ptr + 1 < end)
13480 tmp = contents[ptr];
13481 contents[ptr] = contents[ptr + 1];
13482 contents[ptr + 1] = tmp;
13488 /* Leave data alone. */
13496 arm_data->mapcount = 0;
13497 arm_data->mapsize = 0;
13498 arm_data->map = NULL;
13499 unrecord_section_with_arm_elf_section_data (sec);
13505 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13507 void * ignore ATTRIBUTE_UNUSED)
13509 unrecord_section_with_arm_elf_section_data (sec);
13513 elf32_arm_close_and_cleanup (bfd * abfd)
13515 if (abfd->sections)
13516 bfd_map_over_sections (abfd,
13517 unrecord_section_via_map_over_sections,
13520 return _bfd_elf_close_and_cleanup (abfd);
13524 elf32_arm_bfd_free_cached_info (bfd * abfd)
13526 if (abfd->sections)
13527 bfd_map_over_sections (abfd,
13528 unrecord_section_via_map_over_sections,
13531 return _bfd_free_cached_info (abfd);
13534 /* Display STT_ARM_TFUNC symbols as functions. */
13537 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13540 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13542 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13543 elfsym->symbol.flags |= BSF_FUNCTION;
13547 /* Mangle thumb function symbols as we read them in. */
13550 elf32_arm_swap_symbol_in (bfd * abfd,
13553 Elf_Internal_Sym *dst)
13555 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13558 /* New EABI objects mark thumb function symbols by setting the low bit of
13559 the address. Turn these into STT_ARM_TFUNC. */
13560 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13561 && (dst->st_value & 1))
13563 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13564 dst->st_value &= ~(bfd_vma) 1;
13570 /* Mangle thumb function symbols as we write them out. */
13573 elf32_arm_swap_symbol_out (bfd *abfd,
13574 const Elf_Internal_Sym *src,
13578 Elf_Internal_Sym newsym;
13580 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13581 of the address set, as per the new EABI. We do this unconditionally
13582 because objcopy does not set the elf header flags until after
13583 it writes out the symbol table. */
13584 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13587 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13588 if (newsym.st_shndx != SHN_UNDEF)
13590 /* Do this only for defined symbols. At link type, the static
13591 linker will simulate the work of dynamic linker of resolving
13592 symbols and will carry over the thumbness of found symbols to
13593 the output symbol table. It's not clear how it happens, but
13594 the thumbness of undefined symbols can well be different at
13595 runtime, and writing '1' for them will be confusing for users
13596 and possibly for dynamic linker itself.
13598 newsym.st_value |= 1;
13603 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13606 /* Add the PT_ARM_EXIDX program header. */
13609 elf32_arm_modify_segment_map (bfd *abfd,
13610 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13612 struct elf_segment_map *m;
13615 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13616 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13618 /* If there is already a PT_ARM_EXIDX header, then we do not
13619 want to add another one. This situation arises when running
13620 "strip"; the input binary already has the header. */
13621 m = elf_tdata (abfd)->segment_map;
13622 while (m && m->p_type != PT_ARM_EXIDX)
13626 m = (struct elf_segment_map *)
13627 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13630 m->p_type = PT_ARM_EXIDX;
13632 m->sections[0] = sec;
13634 m->next = elf_tdata (abfd)->segment_map;
13635 elf_tdata (abfd)->segment_map = m;
13642 /* We may add a PT_ARM_EXIDX program header. */
13645 elf32_arm_additional_program_headers (bfd *abfd,
13646 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13650 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13651 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13657 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13660 elf32_arm_is_function_type (unsigned int type)
13662 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13665 /* We use this to override swap_symbol_in and swap_symbol_out. */
13666 const struct elf_size_info elf32_arm_size_info =
13668 sizeof (Elf32_External_Ehdr),
13669 sizeof (Elf32_External_Phdr),
13670 sizeof (Elf32_External_Shdr),
13671 sizeof (Elf32_External_Rel),
13672 sizeof (Elf32_External_Rela),
13673 sizeof (Elf32_External_Sym),
13674 sizeof (Elf32_External_Dyn),
13675 sizeof (Elf_External_Note),
13679 ELFCLASS32, EV_CURRENT,
13680 bfd_elf32_write_out_phdrs,
13681 bfd_elf32_write_shdrs_and_ehdr,
13682 bfd_elf32_checksum_contents,
13683 bfd_elf32_write_relocs,
13684 elf32_arm_swap_symbol_in,
13685 elf32_arm_swap_symbol_out,
13686 bfd_elf32_slurp_reloc_table,
13687 bfd_elf32_slurp_symbol_table,
13688 bfd_elf32_swap_dyn_in,
13689 bfd_elf32_swap_dyn_out,
13690 bfd_elf32_swap_reloc_in,
13691 bfd_elf32_swap_reloc_out,
13692 bfd_elf32_swap_reloca_in,
13693 bfd_elf32_swap_reloca_out
13696 #define ELF_ARCH bfd_arch_arm
13697 #define ELF_MACHINE_CODE EM_ARM
13698 #ifdef __QNXTARGET__
13699 #define ELF_MAXPAGESIZE 0x1000
13701 #define ELF_MAXPAGESIZE 0x8000
13703 #define ELF_MINPAGESIZE 0x1000
13704 #define ELF_COMMONPAGESIZE 0x1000
13706 #define bfd_elf32_mkobject elf32_arm_mkobject
13708 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13709 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13710 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13711 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13712 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13713 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13714 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13715 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13716 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13717 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13718 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13719 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13720 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13721 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13722 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13724 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13725 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13726 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13727 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13728 #define elf_backend_check_relocs elf32_arm_check_relocs
13729 #define elf_backend_relocate_section elf32_arm_relocate_section
13730 #define elf_backend_write_section elf32_arm_write_section
13731 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13732 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13733 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13734 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13735 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13736 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13737 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13738 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13739 #define elf_backend_object_p elf32_arm_object_p
13740 #define elf_backend_section_flags elf32_arm_section_flags
13741 #define elf_backend_fake_sections elf32_arm_fake_sections
13742 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13743 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13744 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13745 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13746 #define elf_backend_size_info elf32_arm_size_info
13747 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13748 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13749 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13750 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13751 #define elf_backend_is_function_type elf32_arm_is_function_type
13753 #define elf_backend_can_refcount 1
13754 #define elf_backend_can_gc_sections 1
13755 #define elf_backend_plt_readonly 1
13756 #define elf_backend_want_got_plt 1
13757 #define elf_backend_want_plt_sym 0
13758 #define elf_backend_may_use_rel_p 1
13759 #define elf_backend_may_use_rela_p 0
13760 #define elf_backend_default_use_rela_p 0
13762 #define elf_backend_got_header_size 12
13764 #undef elf_backend_obj_attrs_vendor
13765 #define elf_backend_obj_attrs_vendor "aeabi"
13766 #undef elf_backend_obj_attrs_section
13767 #define elf_backend_obj_attrs_section ".ARM.attributes"
13768 #undef elf_backend_obj_attrs_arg_type
13769 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13770 #undef elf_backend_obj_attrs_section_type
13771 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13772 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13774 #include "elf32-target.h"
13776 /* VxWorks Targets. */
13778 #undef TARGET_LITTLE_SYM
13779 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13780 #undef TARGET_LITTLE_NAME
13781 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13782 #undef TARGET_BIG_SYM
13783 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13784 #undef TARGET_BIG_NAME
13785 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13787 /* Like elf32_arm_link_hash_table_create -- but overrides
13788 appropriately for VxWorks. */
13790 static struct bfd_link_hash_table *
13791 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13793 struct bfd_link_hash_table *ret;
13795 ret = elf32_arm_link_hash_table_create (abfd);
13798 struct elf32_arm_link_hash_table *htab
13799 = (struct elf32_arm_link_hash_table *) ret;
13801 htab->vxworks_p = 1;
13807 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13809 elf32_arm_final_write_processing (abfd, linker);
13810 elf_vxworks_final_write_processing (abfd, linker);
13814 #define elf32_bed elf32_arm_vxworks_bed
13816 #undef bfd_elf32_bfd_link_hash_table_create
13817 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13818 #undef elf_backend_add_symbol_hook
13819 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13820 #undef elf_backend_final_write_processing
13821 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13822 #undef elf_backend_emit_relocs
13823 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13825 #undef elf_backend_may_use_rel_p
13826 #define elf_backend_may_use_rel_p 0
13827 #undef elf_backend_may_use_rela_p
13828 #define elf_backend_may_use_rela_p 1
13829 #undef elf_backend_default_use_rela_p
13830 #define elf_backend_default_use_rela_p 1
13831 #undef elf_backend_want_plt_sym
13832 #define elf_backend_want_plt_sym 1
13833 #undef ELF_MAXPAGESIZE
13834 #define ELF_MAXPAGESIZE 0x1000
13836 #include "elf32-target.h"
13839 /* Merge backend specific data from an object file to the output
13840 object file when linking. */
13843 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
13845 flagword out_flags;
13847 bfd_boolean flags_compatible = TRUE;
13850 /* Check if we have the same endianess. */
13851 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
13854 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13857 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
13860 /* The input BFD must have had its flags initialised. */
13861 /* The following seems bogus to me -- The flags are initialized in
13862 the assembler but I don't think an elf_flags_init field is
13863 written into the object. */
13864 /* BFD_ASSERT (elf_flags_init (ibfd)); */
13866 in_flags = elf_elfheader (ibfd)->e_flags;
13867 out_flags = elf_elfheader (obfd)->e_flags;
13869 /* In theory there is no reason why we couldn't handle this. However
13870 in practice it isn't even close to working and there is no real
13871 reason to want it. */
13872 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
13873 && !(ibfd->flags & DYNAMIC)
13874 && (in_flags & EF_ARM_BE8))
13876 _bfd_error_handler (_("error: %B is already in final BE8 format"),
13881 if (!elf_flags_init (obfd))
13883 /* If the input is the default architecture and had the default
13884 flags then do not bother setting the flags for the output
13885 architecture, instead allow future merges to do this. If no
13886 future merges ever set these flags then they will retain their
13887 uninitialised values, which surprise surprise, correspond
13888 to the default values. */
13889 if (bfd_get_arch_info (ibfd)->the_default
13890 && elf_elfheader (ibfd)->e_flags == 0)
13893 elf_flags_init (obfd) = TRUE;
13894 elf_elfheader (obfd)->e_flags = in_flags;
13896 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
13897 && bfd_get_arch_info (obfd)->the_default)
13898 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
13903 /* Determine what should happen if the input ARM architecture
13904 does not match the output ARM architecture. */
13905 if (! bfd_arm_merge_machines (ibfd, obfd))
13908 /* Identical flags must be compatible. */
13909 if (in_flags == out_flags)
13912 /* Check to see if the input BFD actually contains any sections. If
13913 not, its flags may not have been initialised either, but it
13914 cannot actually cause any incompatiblity. Do not short-circuit
13915 dynamic objects; their section list may be emptied by
13916 elf_link_add_object_symbols.
13918 Also check to see if there are no code sections in the input.
13919 In this case there is no need to check for code specific flags.
13920 XXX - do we need to worry about floating-point format compatability
13921 in data sections ? */
13922 if (!(ibfd->flags & DYNAMIC))
13924 bfd_boolean null_input_bfd = TRUE;
13925 bfd_boolean only_data_sections = TRUE;
13927 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
13929 /* Ignore synthetic glue sections. */
13930 if (strcmp (sec->name, ".glue_7")
13931 && strcmp (sec->name, ".glue_7t"))
13933 if ((bfd_get_section_flags (ibfd, sec)
13934 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
13935 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
13936 only_data_sections = FALSE;
13938 null_input_bfd = FALSE;
13943 if (null_input_bfd || only_data_sections)
13947 /* Complain about various flag mismatches. */
13948 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
13949 EF_ARM_EABI_VERSION (out_flags)))
13952 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
13954 (in_flags & EF_ARM_EABIMASK) >> 24,
13955 (out_flags & EF_ARM_EABIMASK) >> 24);
13959 /* Not sure what needs to be checked for EABI versions >= 1. */
13960 /* VxWorks libraries do not use these flags. */
13961 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
13962 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
13963 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
13965 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13968 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
13970 in_flags & EF_ARM_APCS_26 ? 26 : 32,
13971 out_flags & EF_ARM_APCS_26 ? 26 : 32);
13972 flags_compatible = FALSE;
13975 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13977 if (in_flags & EF_ARM_APCS_FLOAT)
13979 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
13983 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
13986 flags_compatible = FALSE;
13989 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
13991 if (in_flags & EF_ARM_VFP_FLOAT)
13993 (_("error: %B uses VFP instructions, whereas %B does not"),
13997 (_("error: %B uses FPA instructions, whereas %B does not"),
14000 flags_compatible = FALSE;
14003 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14005 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14007 (_("error: %B uses Maverick instructions, whereas %B does not"),
14011 (_("error: %B does not use Maverick instructions, whereas %B does"),
14014 flags_compatible = FALSE;
14017 #ifdef EF_ARM_SOFT_FLOAT
14018 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14020 /* We can allow interworking between code that is VFP format
14021 layout, and uses either soft float or integer regs for
14022 passing floating point arguments and results. We already
14023 know that the APCS_FLOAT flags match; similarly for VFP
14025 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14026 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14028 if (in_flags & EF_ARM_SOFT_FLOAT)
14030 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14034 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14037 flags_compatible = FALSE;
14042 /* Interworking mismatch is only a warning. */
14043 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14045 if (in_flags & EF_ARM_INTERWORK)
14048 (_("Warning: %B supports interworking, whereas %B does not"),
14054 (_("Warning: %B does not support interworking, whereas %B does"),
14060 return flags_compatible;
14064 /* Symbian OS Targets. */
14066 #undef TARGET_LITTLE_SYM
14067 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14068 #undef TARGET_LITTLE_NAME
14069 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14070 #undef TARGET_BIG_SYM
14071 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14072 #undef TARGET_BIG_NAME
14073 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14075 /* Like elf32_arm_link_hash_table_create -- but overrides
14076 appropriately for Symbian OS. */
14078 static struct bfd_link_hash_table *
14079 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14081 struct bfd_link_hash_table *ret;
14083 ret = elf32_arm_link_hash_table_create (abfd);
14086 struct elf32_arm_link_hash_table *htab
14087 = (struct elf32_arm_link_hash_table *)ret;
14088 /* There is no PLT header for Symbian OS. */
14089 htab->plt_header_size = 0;
14090 /* The PLT entries are each one instruction and one word. */
14091 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14092 htab->symbian_p = 1;
14093 /* Symbian uses armv5t or above, so use_blx is always true. */
14095 htab->root.is_relocatable_executable = 1;
14100 static const struct bfd_elf_special_section
14101 elf32_arm_symbian_special_sections[] =
14103 /* In a BPABI executable, the dynamic linking sections do not go in
14104 the loadable read-only segment. The post-linker may wish to
14105 refer to these sections, but they are not part of the final
14107 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14108 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14109 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14110 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14111 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14112 /* These sections do not need to be writable as the SymbianOS
14113 postlinker will arrange things so that no dynamic relocation is
14115 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14116 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14117 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14118 { NULL, 0, 0, 0, 0 }
14122 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14123 struct bfd_link_info *link_info)
14125 /* BPABI objects are never loaded directly by an OS kernel; they are
14126 processed by a postlinker first, into an OS-specific format. If
14127 the D_PAGED bit is set on the file, BFD will align segments on
14128 page boundaries, so that an OS can directly map the file. With
14129 BPABI objects, that just results in wasted space. In addition,
14130 because we clear the D_PAGED bit, map_sections_to_segments will
14131 recognize that the program headers should not be mapped into any
14132 loadable segment. */
14133 abfd->flags &= ~D_PAGED;
14134 elf32_arm_begin_write_processing (abfd, link_info);
14138 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14139 struct bfd_link_info *info)
14141 struct elf_segment_map *m;
14144 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14145 segment. However, because the .dynamic section is not marked
14146 with SEC_LOAD, the generic ELF code will not create such a
14148 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14151 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14152 if (m->p_type == PT_DYNAMIC)
14157 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14158 m->next = elf_tdata (abfd)->segment_map;
14159 elf_tdata (abfd)->segment_map = m;
14163 /* Also call the generic arm routine. */
14164 return elf32_arm_modify_segment_map (abfd, info);
14167 /* Return address for Ith PLT stub in section PLT, for relocation REL
14168 or (bfd_vma) -1 if it should not be included. */
14171 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14172 const arelent *rel ATTRIBUTE_UNUSED)
14174 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14179 #define elf32_bed elf32_arm_symbian_bed
14181 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14182 will process them and then discard them. */
14183 #undef ELF_DYNAMIC_SEC_FLAGS
14184 #define ELF_DYNAMIC_SEC_FLAGS \
14185 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14187 #undef elf_backend_add_symbol_hook
14188 #undef elf_backend_emit_relocs
14190 #undef bfd_elf32_bfd_link_hash_table_create
14191 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14192 #undef elf_backend_special_sections
14193 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14194 #undef elf_backend_begin_write_processing
14195 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14196 #undef elf_backend_final_write_processing
14197 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14199 #undef elf_backend_modify_segment_map
14200 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14202 /* There is no .got section for BPABI objects, and hence no header. */
14203 #undef elf_backend_got_header_size
14204 #define elf_backend_got_header_size 0
14206 /* Similarly, there is no .got.plt section. */
14207 #undef elf_backend_want_got_plt
14208 #define elf_backend_want_got_plt 0
14210 #undef elf_backend_plt_sym_val
14211 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14213 #undef elf_backend_may_use_rel_p
14214 #define elf_backend_may_use_rel_p 1
14215 #undef elf_backend_may_use_rela_p
14216 #define elf_backend_may_use_rela_p 0
14217 #undef elf_backend_default_use_rela_p
14218 #define elf_backend_default_use_rela_p 0
14219 #undef elf_backend_want_plt_sym
14220 #define elf_backend_want_plt_sym 0
14221 #undef ELF_MAXPAGESIZE
14222 #define ELF_MAXPAGESIZE 0x8000
14224 #include "elf32-target.h"