1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "libiberty.h"
29 #include "elf-vxworks.h"
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 static reloc_howto_type elf32_arm_howto_table_1[] =
76 HOWTO (R_ARM_NONE, /* type */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
80 FALSE, /* pc_relative */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
88 FALSE), /* pcrel_offset */
90 HOWTO (R_ARM_PC24, /* type */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
94 TRUE, /* pc_relative */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
109 FALSE, /* pc_relative */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
124 TRUE, /* pc_relative */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
139 TRUE, /* pc_relative */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
154 FALSE, /* pc_relative */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
169 FALSE, /* pc_relative */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
179 HOWTO (R_ARM_THM_ABS5, /* type */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
183 FALSE, /* pc_relative */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
194 HOWTO (R_ARM_ABS8, /* type */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
198 FALSE, /* pc_relative */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
208 HOWTO (R_ARM_SBREL32, /* type */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
212 FALSE, /* pc_relative */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
222 HOWTO (R_ARM_THM_CALL, /* type */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
226 TRUE, /* pc_relative */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
236 HOWTO (R_ARM_THM_PC8, /* type */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
240 TRUE, /* pc_relative */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
250 HOWTO (R_ARM_BREL_ADJ, /* type */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
254 FALSE, /* pc_relative */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
264 HOWTO (R_ARM_SWI24, /* type */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
268 FALSE, /* pc_relative */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
278 HOWTO (R_ARM_THM_SWI8, /* type */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
282 FALSE, /* pc_relative */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
297 TRUE, /* pc_relative */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
312 TRUE, /* pc_relative */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
322 /* Dynamic TLS relocations. */
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
328 FALSE, /* pc_relative */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
342 FALSE, /* pc_relative */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
356 FALSE, /* pc_relative */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
366 /* Relocs used in ARM Linux */
368 HOWTO (R_ARM_COPY, /* type */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
372 FALSE, /* pc_relative */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
382 HOWTO (R_ARM_GLOB_DAT, /* type */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
386 FALSE, /* pc_relative */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
400 FALSE, /* pc_relative */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
410 HOWTO (R_ARM_RELATIVE, /* type */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
414 FALSE, /* pc_relative */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
424 HOWTO (R_ARM_GOTOFF32, /* type */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
428 FALSE, /* pc_relative */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
438 HOWTO (R_ARM_GOTPC, /* type */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
442 TRUE, /* pc_relative */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
452 HOWTO (R_ARM_GOT32, /* type */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
456 FALSE, /* pc_relative */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
466 HOWTO (R_ARM_PLT32, /* type */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
470 TRUE, /* pc_relative */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
480 HOWTO (R_ARM_CALL, /* type */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
484 TRUE, /* pc_relative */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
494 HOWTO (R_ARM_JUMP24, /* type */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
498 TRUE, /* pc_relative */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
508 HOWTO (R_ARM_THM_JUMP24, /* type */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
512 TRUE, /* pc_relative */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
522 HOWTO (R_ARM_BASE_ABS, /* type */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
526 FALSE, /* pc_relative */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
540 TRUE, /* pc_relative */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
554 TRUE, /* pc_relative */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 TRUE, /* pc_relative */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
582 FALSE, /* pc_relative */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
596 FALSE, /* pc_relative */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
610 FALSE, /* pc_relative */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
620 HOWTO (R_ARM_TARGET1, /* type */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
624 FALSE, /* pc_relative */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
634 HOWTO (R_ARM_ROSEGREL32, /* type */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
638 FALSE, /* pc_relative */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
648 HOWTO (R_ARM_V4BX, /* type */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
652 FALSE, /* pc_relative */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
662 HOWTO (R_ARM_TARGET2, /* type */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
666 FALSE, /* pc_relative */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
676 HOWTO (R_ARM_PREL31, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE, /* pc_relative */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
694 FALSE, /* pc_relative */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
704 HOWTO (R_ARM_MOVT_ABS, /* type */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
708 FALSE, /* pc_relative */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
722 TRUE, /* pc_relative */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
732 HOWTO (R_ARM_MOVT_PREL, /* type */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
736 TRUE, /* pc_relative */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
750 FALSE, /* pc_relative */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
764 FALSE, /* pc_relative */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
778 TRUE, /* pc_relative */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
792 TRUE, /* pc_relative */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
802 HOWTO (R_ARM_THM_JUMP19, /* type */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
806 TRUE, /* pc_relative */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
816 HOWTO (R_ARM_THM_JUMP6, /* type */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
820 TRUE, /* pc_relative */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
837 TRUE, /* pc_relative */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
847 HOWTO (R_ARM_THM_PC12, /* type */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
851 TRUE, /* pc_relative */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
861 HOWTO (R_ARM_ABS32_NOI, /* type */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
865 FALSE, /* pc_relative */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
875 HOWTO (R_ARM_REL32_NOI, /* type */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
879 TRUE, /* pc_relative */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
889 /* Group relocations. */
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
895 TRUE, /* pc_relative */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
909 TRUE, /* pc_relative */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
923 TRUE, /* pc_relative */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
937 TRUE, /* pc_relative */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 TRUE, /* pc_relative */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
965 TRUE, /* pc_relative */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
979 TRUE, /* pc_relative */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
993 TRUE, /* pc_relative */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1007 TRUE, /* pc_relative */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1021 TRUE, /* pc_relative */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 TRUE, /* pc_relative */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1049 TRUE, /* pc_relative */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1063 TRUE, /* pc_relative */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1077 TRUE, /* pc_relative */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1091 TRUE, /* pc_relative */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1105 TRUE, /* pc_relative */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1119 TRUE, /* pc_relative */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1133 TRUE, /* pc_relative */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1147 TRUE, /* pc_relative */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1161 TRUE, /* pc_relative */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1175 TRUE, /* pc_relative */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1189 TRUE, /* pc_relative */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1203 TRUE, /* pc_relative */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1217 TRUE, /* pc_relative */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1231 TRUE, /* pc_relative */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1245 TRUE, /* pc_relative */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1259 TRUE, /* pc_relative */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1269 /* End of group relocations. */
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1275 FALSE, /* pc_relative */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1289 FALSE, /* pc_relative */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1303 FALSE, /* pc_relative */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1317 FALSE, /* pc_relative */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1331 FALSE, /* pc_relative */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1345 FALSE, /* pc_relative */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1355 EMPTY_HOWTO (90), /* Unallocated. */
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 FALSE, /* pc_relative */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 FALSE, /* pc_relative */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 TRUE, /* pc_relative */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 FALSE, /* pc_relative */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 FALSE, /* pc_relative */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1437 FALSE, /* pc_relative */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1445 FALSE), /* pcrel_offset */
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1452 FALSE, /* pc_relative */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1460 FALSE), /* pcrel_offset */
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1466 TRUE, /* pc_relative */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1480 TRUE, /* pc_relative */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1495 FALSE, /* pc_relative */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1509 FALSE, /* pc_relative */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1523 FALSE, /* pc_relative */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1537 FALSE, /* pc_relative */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 FALSE, /* pc_relative */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 FALSE, /* pc_relative */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 FALSE, /* pc_relative */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 FALSE, /* pc_relative */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1608 249-255 extended, currently unused, relocations: */
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1612 HOWTO (R_ARM_RREL32, /* type */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1616 FALSE, /* pc_relative */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1624 FALSE), /* pcrel_offset */
1626 HOWTO (R_ARM_RABS32, /* type */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1630 FALSE, /* pc_relative */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1638 FALSE), /* pcrel_offset */
1640 HOWTO (R_ARM_RPC24, /* type */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1644 FALSE, /* pc_relative */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1652 FALSE), /* pcrel_offset */
1654 HOWTO (R_ARM_RBASE, /* type */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1658 FALSE, /* pc_relative */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1666 FALSE) /* pcrel_offset */
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1686 unsigned int r_type;
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 struct elf32_arm_reloc_map
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1725 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1726 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1727 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1728 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1729 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1730 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1731 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1732 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1733 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1734 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1735 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1736 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1737 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1738 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1739 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1740 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1741 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1742 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1743 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1744 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1745 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1746 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1747 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1748 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1749 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1750 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1751 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1752 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1753 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1754 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1755 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1756 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1757 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1758 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1759 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1760 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1761 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1762 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1763 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1764 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1765 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1766 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1767 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1768 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1769 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1770 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1771 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1772 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1773 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1774 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1775 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1776 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1777 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1778 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1779 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1782 static reloc_howto_type *
1783 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1784 bfd_reloc_code_real_type code)
1788 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1789 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1790 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1795 static reloc_howto_type *
1796 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1801 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1802 if (elf32_arm_howto_table_1[i].name != NULL
1803 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1804 return &elf32_arm_howto_table_1[i];
1806 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1807 if (elf32_arm_howto_table_2[i].name != NULL
1808 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1809 return &elf32_arm_howto_table_2[i];
1814 /* Support for core dump NOTE sections. */
1817 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1822 switch (note->descsz)
1827 case 148: /* Linux/ARM 32-bit. */
1829 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1832 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1841 /* Make a ".reg/999" section. */
1842 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1843 size, note->descpos + offset);
1847 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 switch (note->descsz)
1854 case 124: /* Linux/ARM elf_prpsinfo. */
1855 elf_tdata (abfd)->core_program
1856 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1857 elf_tdata (abfd)->core_command
1858 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1861 /* Note that for some reason, a spurious space is tacked
1862 onto the end of the args in some (at least one anyway)
1863 implementations, so strip it off if it exists. */
1865 char *command = elf_tdata (abfd)->core_command;
1866 int n = strlen (command);
1868 if (0 < n && command[n - 1] == ' ')
1869 command[n - 1] = '\0';
1875 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1876 #define TARGET_LITTLE_NAME "elf32-littlearm"
1877 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1878 #define TARGET_BIG_NAME "elf32-bigarm"
1880 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1881 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883 typedef unsigned long int insn32;
1884 typedef unsigned short int insn16;
1886 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 #define INTERWORK_FLAG(abfd) \
1889 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1890 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1891 || ((abfd)->flags & BFD_LINKER_CREATED))
1893 /* The linker script knows the section names for placement.
1894 The entry_names are used to do simple name mangling on the stubs.
1895 Given a function name, and its type, the stub can be found. The
1896 name can be changed. The only requirement is the %s be present. */
1897 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1898 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1901 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1904 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1907 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909 #define STUB_ENTRY_NAME "__%s_veneer"
1911 /* The name of the dynamic interpreter. This is put in the .interp
1913 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915 #ifdef FOUR_WORD_PLT
1917 /* The first entry in a procedure linkage table looks like
1918 this. It is set up so that any shared library function that is
1919 called before the relocation has been set up calls the dynamic
1921 static const bfd_vma elf32_arm_plt0_entry [] =
1923 0xe52de004, /* str lr, [sp, #-4]! */
1924 0xe59fe010, /* ldr lr, [pc, #16] */
1925 0xe08fe00e, /* add lr, pc, lr */
1926 0xe5bef008, /* ldr pc, [lr, #8]! */
1929 /* Subsequent entries in a procedure linkage table look like
1931 static const bfd_vma elf32_arm_plt_entry [] =
1933 0xe28fc600, /* add ip, pc, #NN */
1934 0xe28cca00, /* add ip, ip, #NN */
1935 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1936 0x00000000, /* unused */
1941 /* The first entry in a procedure linkage table looks like
1942 this. It is set up so that any shared library function that is
1943 called before the relocation has been set up calls the dynamic
1945 static const bfd_vma elf32_arm_plt0_entry [] =
1947 0xe52de004, /* str lr, [sp, #-4]! */
1948 0xe59fe004, /* ldr lr, [pc, #4] */
1949 0xe08fe00e, /* add lr, pc, lr */
1950 0xe5bef008, /* ldr pc, [lr, #8]! */
1951 0x00000000, /* &GOT[0] - . */
1954 /* Subsequent entries in a procedure linkage table look like
1956 static const bfd_vma elf32_arm_plt_entry [] =
1958 0xe28fc600, /* add ip, pc, #0xNN00000 */
1959 0xe28cca00, /* add ip, ip, #0xNN000 */
1960 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1965 /* The format of the first entry in the procedure linkage table
1966 for a VxWorks executable. */
1967 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 0xe52dc008, /* str ip,[sp,#-8]! */
1970 0xe59fc000, /* ldr ip,[pc] */
1971 0xe59cf008, /* ldr pc,[ip,#8] */
1972 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1975 /* The format of subsequent entries in a VxWorks executable. */
1976 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 0xe59fc000, /* ldr ip,[pc] */
1979 0xe59cf000, /* ldr pc,[ip] */
1980 0x00000000, /* .long @got */
1981 0xe59fc000, /* ldr ip,[pc] */
1982 0xea000000, /* b _PLT */
1983 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1986 /* The format of entries in a VxWorks shared library. */
1987 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 0xe59fc000, /* ldr ip,[pc] */
1990 0xe79cf009, /* ldr pc,[ip,r9] */
1991 0x00000000, /* .long @got */
1992 0xe59fc000, /* ldr ip,[pc] */
1993 0xe599f008, /* ldr pc,[r9,#8] */
1994 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1997 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1998 #define PLT_THUMB_STUB_SIZE 4
1999 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2005 /* The entries in a PLT when using a DLL-based target with multiple
2007 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 0xe51ff004, /* ldr pc, [pc, #-4] */
2010 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2013 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2014 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2015 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2016 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2017 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2018 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2028 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2029 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2030 is inserted in arm_build_one_stub(). */
2031 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2032 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2033 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2034 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2035 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2036 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2041 enum stub_insn_type type;
2042 unsigned int r_type;
2046 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2047 to reach the stub if necessary. */
2048 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2051 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2054 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2059 ARM_INSN(0xe12fff1c), /* bx ip */
2060 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2063 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2064 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 THUMB16_INSN(0xb401), /* push {r0} */
2067 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2068 THUMB16_INSN(0x4684), /* mov ip, r0 */
2069 THUMB16_INSN(0xbc01), /* pop {r0} */
2070 THUMB16_INSN(0x4760), /* bx ip */
2071 THUMB16_INSN(0xbf00), /* nop */
2072 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2075 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 THUMB16_INSN(0x4778), /* bx pc */
2080 THUMB16_INSN(0x46c0), /* nop */
2081 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2082 ARM_INSN(0xe12fff1c), /* bx ip */
2083 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2086 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 THUMB16_INSN(0x4778), /* bx pc */
2091 THUMB16_INSN(0x46c0), /* nop */
2092 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2093 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2096 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2097 one, when the destination is close enough. */
2098 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 THUMB16_INSN(0x4778), /* bx pc */
2101 THUMB16_INSN(0x46c0), /* nop */
2102 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2105 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2106 blx to reach the stub if necessary. */
2107 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 ARM_INSN(0xe59fc000), /* ldr ip, [pc] */
2110 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2111 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2114 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2115 blx to reach the stub if necessary. We can not add into pc;
2116 it is not guaranteed to mode switch (different in ARMv6 and
2118 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2121 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2122 ARM_INSN(0xe12fff1c), /* bx ip */
2123 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2126 /* V4T ARM -> ARM long branch stub, PIC. */
2127 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2130 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2131 ARM_INSN(0xe12fff1c), /* bx ip */
2132 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2135 /* V4T Thumb -> ARM long branch stub, PIC. */
2136 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 THUMB16_INSN(0x4778), /* bx pc */
2139 THUMB16_INSN(0x46c0), /* nop */
2140 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2141 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2142 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2145 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 THUMB16_INSN(0xb401), /* push {r0} */
2150 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2151 THUMB16_INSN(0x46fc), /* mov ip, pc */
2152 THUMB16_INSN(0x4484), /* add ip, r0 */
2153 THUMB16_INSN(0xbc01), /* pop {r0} */
2154 THUMB16_INSN(0x4760), /* bx ip */
2155 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2158 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 THUMB16_INSN(0x4778), /* bx pc */
2163 THUMB16_INSN(0x46c0), /* nop */
2164 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2165 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2166 ARM_INSN(0xe12fff1c), /* bx ip */
2167 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2170 /* Cortex-A8 erratum-workaround stubs. */
2172 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2173 can't use a conditional branch to reach this stub). */
2175 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2178 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2179 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2182 /* Stub used for b.w and bl.w instructions. */
2184 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2189 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2194 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2195 instruction (which switches to ARM mode) to point to this stub. Jump to the
2196 real destination using an ARM-mode branch. */
2198 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2203 /* Section name for stubs is the associated section name plus this
2205 #define STUB_SUFFIX ".stub"
2207 /* One entry per long/short branch stub defined above. */
2209 DEF_STUB(long_branch_any_any) \
2210 DEF_STUB(long_branch_v4t_arm_thumb) \
2211 DEF_STUB(long_branch_thumb_only) \
2212 DEF_STUB(long_branch_v4t_thumb_thumb) \
2213 DEF_STUB(long_branch_v4t_thumb_arm) \
2214 DEF_STUB(short_branch_v4t_thumb_arm) \
2215 DEF_STUB(long_branch_any_arm_pic) \
2216 DEF_STUB(long_branch_any_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2220 DEF_STUB(long_branch_thumb_only_pic) \
2221 DEF_STUB(a8_veneer_b_cond) \
2222 DEF_STUB(a8_veneer_b) \
2223 DEF_STUB(a8_veneer_bl) \
2224 DEF_STUB(a8_veneer_blx)
2226 #define DEF_STUB(x) arm_stub_##x,
2227 enum elf32_arm_stub_type {
2230 /* Note the first a8_veneer type */
2231 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2237 const insn_sequence* template_sequence;
2241 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2242 static const stub_def stub_definitions[] = {
2247 struct elf32_arm_stub_hash_entry
2249 /* Base hash table entry structure. */
2250 struct bfd_hash_entry root;
2252 /* The stub section. */
2255 /* Offset within stub_sec of the beginning of this stub. */
2256 bfd_vma stub_offset;
2258 /* Given the symbol's value and its section we can determine its final
2259 value when building the stubs (so the stub knows where to jump). */
2260 bfd_vma target_value;
2261 asection *target_section;
2263 /* Offset to apply to relocation referencing target_value. */
2264 bfd_vma target_addend;
2266 /* The instruction which caused this stub to be generated (only valid for
2267 Cortex-A8 erratum workaround stubs at present). */
2268 unsigned long orig_insn;
2270 /* The stub type. */
2271 enum elf32_arm_stub_type stub_type;
2272 /* Its encoding size in bytes. */
2275 const insn_sequence *stub_template;
2276 /* The size of the template (number of entries). */
2277 int stub_template_size;
2279 /* The symbol table entry, if any, that this was derived from. */
2280 struct elf32_arm_link_hash_entry *h;
2282 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2283 unsigned char st_type;
2285 /* Where this stub is being called from, or, in the case of combined
2286 stub sections, the first input section in the group. */
2289 /* The name for the local symbol at the start of this stub. The
2290 stub name in the hash table has to be unique; this does not, so
2291 it can be friendlier. */
2295 /* Used to build a map of a section. This is required for mixed-endian
2298 typedef struct elf32_elf_section_map
2303 elf32_arm_section_map;
2305 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2309 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2310 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2311 VFP11_ERRATUM_ARM_VENEER,
2312 VFP11_ERRATUM_THUMB_VENEER
2314 elf32_vfp11_erratum_type;
2316 typedef struct elf32_vfp11_erratum_list
2318 struct elf32_vfp11_erratum_list *next;
2324 struct elf32_vfp11_erratum_list *veneer;
2325 unsigned int vfp_insn;
2329 struct elf32_vfp11_erratum_list *branch;
2333 elf32_vfp11_erratum_type type;
2335 elf32_vfp11_erratum_list;
2340 INSERT_EXIDX_CANTUNWIND_AT_END
2342 arm_unwind_edit_type;
2344 /* A (sorted) list of edits to apply to an unwind table. */
2345 typedef struct arm_unwind_table_edit
2347 arm_unwind_edit_type type;
2348 /* Note: we sometimes want to insert an unwind entry corresponding to a
2349 section different from the one we're currently writing out, so record the
2350 (text) section this edit relates to here. */
2351 asection *linked_section;
2353 struct arm_unwind_table_edit *next;
2355 arm_unwind_table_edit;
2357 typedef struct _arm_elf_section_data
2359 /* Information about mapping symbols. */
2360 struct bfd_elf_section_data elf;
2361 unsigned int mapcount;
2362 unsigned int mapsize;
2363 elf32_arm_section_map *map;
2364 /* Information about CPU errata. */
2365 unsigned int erratumcount;
2366 elf32_vfp11_erratum_list *erratumlist;
2367 /* Information about unwind tables. */
2370 /* Unwind info attached to a text section. */
2373 asection *arm_exidx_sec;
2376 /* Unwind info attached to an .ARM.exidx section. */
2379 arm_unwind_table_edit *unwind_edit_list;
2380 arm_unwind_table_edit *unwind_edit_tail;
2384 _arm_elf_section_data;
2386 #define elf32_arm_section_data(sec) \
2387 ((_arm_elf_section_data *) elf_section_data (sec))
2389 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2390 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2391 so may be created multiple times: we use an array of these entries whilst
2392 relaxing which we can refresh easily, then create stubs for each potentially
2393 erratum-triggering instruction once we've settled on a solution. */
2395 struct a8_erratum_fix {
2400 unsigned long orig_insn;
2402 enum elf32_arm_stub_type stub_type;
2406 /* A table of relocs applied to branches which might trigger Cortex-A8
2409 struct a8_erratum_reloc {
2411 bfd_vma destination;
2412 struct elf32_arm_link_hash_entry *hash;
2413 const char *sym_name;
2414 unsigned int r_type;
2415 unsigned char st_type;
2416 bfd_boolean non_a8_stub;
2419 /* The size of the thread control block. */
2422 struct elf_arm_obj_tdata
2424 struct elf_obj_tdata root;
2426 /* tls_type for each local got entry. */
2427 char *local_got_tls_type;
2429 /* Zero to warn when linking objects with incompatible enum sizes. */
2430 int no_enum_size_warning;
2432 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2433 int no_wchar_size_warning;
2436 #define elf_arm_tdata(bfd) \
2437 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2439 #define elf32_arm_local_got_tls_type(bfd) \
2440 (elf_arm_tdata (bfd)->local_got_tls_type)
2442 #define is_arm_elf(bfd) \
2443 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2444 && elf_tdata (bfd) != NULL \
2445 && elf_object_id (bfd) == ARM_ELF_DATA)
2448 elf32_arm_mkobject (bfd *abfd)
2450 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2454 /* The ARM linker needs to keep track of the number of relocs that it
2455 decides to copy in check_relocs for each symbol. This is so that
2456 it can discard PC relative relocs if it doesn't need them when
2457 linking with -Bsymbolic. We store the information in a field
2458 extending the regular ELF linker hash table. */
2460 /* This structure keeps track of the number of relocs we have copied
2461 for a given symbol. */
2462 struct elf32_arm_relocs_copied
2465 struct elf32_arm_relocs_copied * next;
2466 /* A section in dynobj. */
2468 /* Number of relocs copied in this section. */
2469 bfd_size_type count;
2470 /* Number of PC-relative relocs copied in this section. */
2471 bfd_size_type pc_count;
2474 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2476 /* Arm ELF linker hash entry. */
2477 struct elf32_arm_link_hash_entry
2479 struct elf_link_hash_entry root;
2481 /* Number of PC relative relocs copied for this symbol. */
2482 struct elf32_arm_relocs_copied * relocs_copied;
2484 /* We reference count Thumb references to a PLT entry separately,
2485 so that we can emit the Thumb trampoline only if needed. */
2486 bfd_signed_vma plt_thumb_refcount;
2488 /* Some references from Thumb code may be eliminated by BL->BLX
2489 conversion, so record them separately. */
2490 bfd_signed_vma plt_maybe_thumb_refcount;
2492 /* Since PLT entries have variable size if the Thumb prologue is
2493 used, we need to record the index into .got.plt instead of
2494 recomputing it from the PLT offset. */
2495 bfd_signed_vma plt_got_offset;
2497 #define GOT_UNKNOWN 0
2498 #define GOT_NORMAL 1
2499 #define GOT_TLS_GD 2
2500 #define GOT_TLS_IE 4
2501 unsigned char tls_type;
2503 /* The symbol marking the real symbol location for exported thumb
2504 symbols with Arm stubs. */
2505 struct elf_link_hash_entry *export_glue;
2507 /* A pointer to the most recently used stub hash entry against this
2509 struct elf32_arm_stub_hash_entry *stub_cache;
2512 /* Traverse an arm ELF linker hash table. */
2513 #define elf32_arm_link_hash_traverse(table, func, info) \
2514 (elf_link_hash_traverse \
2516 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2519 /* Get the ARM elf linker hash table from a link_info structure. */
2520 #define elf32_arm_hash_table(info) \
2521 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2522 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2524 #define arm_stub_hash_lookup(table, string, create, copy) \
2525 ((struct elf32_arm_stub_hash_entry *) \
2526 bfd_hash_lookup ((table), (string), (create), (copy)))
2528 /* Array to keep track of which stub sections have been created, and
2529 information on stub grouping. */
2532 /* This is the section to which stubs in the group will be
2535 /* The stub section. */
2539 /* ARM ELF linker hash table. */
2540 struct elf32_arm_link_hash_table
2542 /* The main hash table. */
2543 struct elf_link_hash_table root;
2545 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2546 bfd_size_type thumb_glue_size;
2548 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2549 bfd_size_type arm_glue_size;
2551 /* The size in bytes of section containing the ARMv4 BX veneers. */
2552 bfd_size_type bx_glue_size;
2554 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2555 veneer has been populated. */
2556 bfd_vma bx_glue_offset[15];
2558 /* The size in bytes of the section containing glue for VFP11 erratum
2560 bfd_size_type vfp11_erratum_glue_size;
2562 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2563 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2564 elf32_arm_write_section(). */
2565 struct a8_erratum_fix *a8_erratum_fixes;
2566 unsigned int num_a8_erratum_fixes;
2568 /* An arbitrary input BFD chosen to hold the glue sections. */
2569 bfd * bfd_of_glue_owner;
2571 /* Nonzero to output a BE8 image. */
2574 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2575 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2578 /* The relocation to use for R_ARM_TARGET2 relocations. */
2581 /* 0 = Ignore R_ARM_V4BX.
2582 1 = Convert BX to MOV PC.
2583 2 = Generate v4 interworing stubs. */
2586 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2589 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2592 /* What sort of code sequences we should look for which may trigger the
2593 VFP11 denorm erratum. */
2594 bfd_arm_vfp11_fix vfp11_fix;
2596 /* Global counter for the number of fixes we have emitted. */
2597 int num_vfp11_fixes;
2599 /* Nonzero to force PIC branch veneers. */
2602 /* The number of bytes in the initial entry in the PLT. */
2603 bfd_size_type plt_header_size;
2605 /* The number of bytes in the subsequent PLT etries. */
2606 bfd_size_type plt_entry_size;
2608 /* True if the target system is VxWorks. */
2611 /* True if the target system is Symbian OS. */
2614 /* True if the target uses REL relocations. */
2617 /* Short-cuts to get to dynamic linker sections. */
2626 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2629 /* Data for R_ARM_TLS_LDM32 relocations. */
2632 bfd_signed_vma refcount;
2636 /* Small local sym cache. */
2637 struct sym_cache sym_cache;
2639 /* For convenience in allocate_dynrelocs. */
2642 /* The stub hash table. */
2643 struct bfd_hash_table stub_hash_table;
2645 /* Linker stub bfd. */
2648 /* Linker call-backs. */
2649 asection * (*add_stub_section) (const char *, asection *);
2650 void (*layout_sections_again) (void);
2652 /* Array to keep track of which stub sections have been created, and
2653 information on stub grouping. */
2654 struct map_stub *stub_group;
2656 /* Number of elements in stub_group. */
2659 /* Assorted information used by elf32_arm_size_stubs. */
2660 unsigned int bfd_count;
2662 asection **input_list;
2665 /* Create an entry in an ARM ELF linker hash table. */
2667 static struct bfd_hash_entry *
2668 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2669 struct bfd_hash_table * table,
2670 const char * string)
2672 struct elf32_arm_link_hash_entry * ret =
2673 (struct elf32_arm_link_hash_entry *) entry;
2675 /* Allocate the structure if it has not already been allocated by a
2678 ret = (struct elf32_arm_link_hash_entry *)
2679 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2681 return (struct bfd_hash_entry *) ret;
2683 /* Call the allocation method of the superclass. */
2684 ret = ((struct elf32_arm_link_hash_entry *)
2685 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2689 ret->relocs_copied = NULL;
2690 ret->tls_type = GOT_UNKNOWN;
2691 ret->plt_thumb_refcount = 0;
2692 ret->plt_maybe_thumb_refcount = 0;
2693 ret->plt_got_offset = -1;
2694 ret->export_glue = NULL;
2696 ret->stub_cache = NULL;
2699 return (struct bfd_hash_entry *) ret;
2702 /* Initialize an entry in the stub hash table. */
2704 static struct bfd_hash_entry *
2705 stub_hash_newfunc (struct bfd_hash_entry *entry,
2706 struct bfd_hash_table *table,
2709 /* Allocate the structure if it has not already been allocated by a
2713 entry = (struct bfd_hash_entry *)
2714 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2719 /* Call the allocation method of the superclass. */
2720 entry = bfd_hash_newfunc (entry, table, string);
2723 struct elf32_arm_stub_hash_entry *eh;
2725 /* Initialize the local fields. */
2726 eh = (struct elf32_arm_stub_hash_entry *) entry;
2727 eh->stub_sec = NULL;
2728 eh->stub_offset = 0;
2729 eh->target_value = 0;
2730 eh->target_section = NULL;
2731 eh->target_addend = 0;
2733 eh->stub_type = arm_stub_none;
2735 eh->stub_template = NULL;
2736 eh->stub_template_size = 0;
2739 eh->output_name = NULL;
2745 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2746 shortcuts to them in our hash table. */
2749 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2751 struct elf32_arm_link_hash_table *htab;
2753 htab = elf32_arm_hash_table (info);
2757 /* BPABI objects never have a GOT, or associated sections. */
2758 if (htab->symbian_p)
2761 if (! _bfd_elf_create_got_section (dynobj, info))
2764 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2765 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2766 if (!htab->sgot || !htab->sgotplt)
2769 htab->srelgot = bfd_get_section_by_name (dynobj,
2770 RELOC_SECTION (htab, ".got"));
2771 if (htab->srelgot == NULL)
2776 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2777 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2781 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2783 struct elf32_arm_link_hash_table *htab;
2785 htab = elf32_arm_hash_table (info);
2789 if (!htab->sgot && !create_got_section (dynobj, info))
2792 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2795 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2796 htab->srelplt = bfd_get_section_by_name (dynobj,
2797 RELOC_SECTION (htab, ".plt"));
2798 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2800 htab->srelbss = bfd_get_section_by_name (dynobj,
2801 RELOC_SECTION (htab, ".bss"));
2803 if (htab->vxworks_p)
2805 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2810 htab->plt_header_size = 0;
2811 htab->plt_entry_size
2812 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2816 htab->plt_header_size
2817 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2818 htab->plt_entry_size
2819 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2826 || (!info->shared && !htab->srelbss))
2832 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2835 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2836 struct elf_link_hash_entry *dir,
2837 struct elf_link_hash_entry *ind)
2839 struct elf32_arm_link_hash_entry *edir, *eind;
2841 edir = (struct elf32_arm_link_hash_entry *) dir;
2842 eind = (struct elf32_arm_link_hash_entry *) ind;
2844 if (eind->relocs_copied != NULL)
2846 if (edir->relocs_copied != NULL)
2848 struct elf32_arm_relocs_copied **pp;
2849 struct elf32_arm_relocs_copied *p;
2851 /* Add reloc counts against the indirect sym to the direct sym
2852 list. Merge any entries against the same section. */
2853 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2855 struct elf32_arm_relocs_copied *q;
2857 for (q = edir->relocs_copied; q != NULL; q = q->next)
2858 if (q->section == p->section)
2860 q->pc_count += p->pc_count;
2861 q->count += p->count;
2868 *pp = edir->relocs_copied;
2871 edir->relocs_copied = eind->relocs_copied;
2872 eind->relocs_copied = NULL;
2875 if (ind->root.type == bfd_link_hash_indirect)
2877 /* Copy over PLT info. */
2878 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2879 eind->plt_thumb_refcount = 0;
2880 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2881 eind->plt_maybe_thumb_refcount = 0;
2883 if (dir->got.refcount <= 0)
2885 edir->tls_type = eind->tls_type;
2886 eind->tls_type = GOT_UNKNOWN;
2890 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2893 /* Create an ARM elf linker hash table. */
2895 static struct bfd_link_hash_table *
2896 elf32_arm_link_hash_table_create (bfd *abfd)
2898 struct elf32_arm_link_hash_table *ret;
2899 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2901 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2905 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2906 elf32_arm_link_hash_newfunc,
2907 sizeof (struct elf32_arm_link_hash_entry),
2915 ret->sgotplt = NULL;
2916 ret->srelgot = NULL;
2918 ret->srelplt = NULL;
2919 ret->sdynbss = NULL;
2920 ret->srelbss = NULL;
2921 ret->srelplt2 = NULL;
2922 ret->thumb_glue_size = 0;
2923 ret->arm_glue_size = 0;
2924 ret->bx_glue_size = 0;
2925 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2926 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2927 ret->vfp11_erratum_glue_size = 0;
2928 ret->num_vfp11_fixes = 0;
2929 ret->fix_cortex_a8 = 0;
2930 ret->bfd_of_glue_owner = NULL;
2931 ret->byteswap_code = 0;
2932 ret->target1_is_rel = 0;
2933 ret->target2_reloc = R_ARM_NONE;
2934 #ifdef FOUR_WORD_PLT
2935 ret->plt_header_size = 16;
2936 ret->plt_entry_size = 16;
2938 ret->plt_header_size = 20;
2939 ret->plt_entry_size = 12;
2946 ret->sym_cache.abfd = NULL;
2948 ret->tls_ldm_got.refcount = 0;
2949 ret->stub_bfd = NULL;
2950 ret->add_stub_section = NULL;
2951 ret->layout_sections_again = NULL;
2952 ret->stub_group = NULL;
2956 ret->input_list = NULL;
2958 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2959 sizeof (struct elf32_arm_stub_hash_entry)))
2965 return &ret->root.root;
2968 /* Free the derived linker hash table. */
2971 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2973 struct elf32_arm_link_hash_table *ret
2974 = (struct elf32_arm_link_hash_table *) hash;
2976 bfd_hash_table_free (&ret->stub_hash_table);
2977 _bfd_generic_link_hash_table_free (hash);
2980 /* Determine if we're dealing with a Thumb only architecture. */
2983 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2985 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2989 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
2992 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2995 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2996 Tag_CPU_arch_profile);
2998 return profile == 'M';
3001 /* Determine if we're dealing with a Thumb-2 object. */
3004 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3006 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3008 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3011 /* Determine what kind of NOPs are available. */
3014 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3016 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3018 return arch == TAG_CPU_ARCH_V6T2
3019 || arch == TAG_CPU_ARCH_V6K
3020 || arch == TAG_CPU_ARCH_V7
3021 || arch == TAG_CPU_ARCH_V7E_M;
3025 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3027 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3029 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3030 || arch == TAG_CPU_ARCH_V7E_M);
3034 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3038 case arm_stub_long_branch_thumb_only:
3039 case arm_stub_long_branch_v4t_thumb_arm:
3040 case arm_stub_short_branch_v4t_thumb_arm:
3041 case arm_stub_long_branch_v4t_thumb_arm_pic:
3042 case arm_stub_long_branch_thumb_only_pic:
3053 /* Determine the type of stub needed, if any, for a call. */
3055 static enum elf32_arm_stub_type
3056 arm_type_of_stub (struct bfd_link_info *info,
3057 asection *input_sec,
3058 const Elf_Internal_Rela *rel,
3059 int *actual_st_type,
3060 struct elf32_arm_link_hash_entry *hash,
3061 bfd_vma destination,
3067 bfd_signed_vma branch_offset;
3068 unsigned int r_type;
3069 struct elf32_arm_link_hash_table * globals;
3072 enum elf32_arm_stub_type stub_type = arm_stub_none;
3074 int st_type = *actual_st_type;
3076 /* We don't know the actual type of destination in case it is of
3077 type STT_SECTION: give up. */
3078 if (st_type == STT_SECTION)
3081 globals = elf32_arm_hash_table (info);
3082 if (globals == NULL)
3085 thumb_only = using_thumb_only (globals);
3087 thumb2 = using_thumb2 (globals);
3089 /* Determine where the call point is. */
3090 location = (input_sec->output_offset
3091 + input_sec->output_section->vma
3094 r_type = ELF32_R_TYPE (rel->r_info);
3096 /* Keep a simpler condition, for the sake of clarity. */
3097 if (globals->splt != NULL
3099 && hash->root.plt.offset != (bfd_vma) -1)
3103 /* Note when dealing with PLT entries: the main PLT stub is in
3104 ARM mode, so if the branch is in Thumb mode, another
3105 Thumb->ARM stub will be inserted later just before the ARM
3106 PLT stub. We don't take this extra distance into account
3107 here, because if a long branch stub is needed, we'll add a
3108 Thumb->Arm one and branch directly to the ARM PLT entry
3109 because it avoids spreading offset corrections in several
3112 destination = (globals->splt->output_section->vma
3113 + globals->splt->output_offset
3114 + hash->root.plt.offset);
3118 branch_offset = (bfd_signed_vma)(destination - location);
3120 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3122 /* Handle cases where:
3123 - this call goes too far (different Thumb/Thumb2 max
3125 - it's a Thumb->Arm call and blx is not available, or it's a
3126 Thumb->Arm branch (not bl). A stub is needed in this case,
3127 but only if this call is not through a PLT entry. Indeed,
3128 PLT stubs handle mode switching already.
3131 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3132 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3134 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3135 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3136 || ((st_type != STT_ARM_TFUNC)
3137 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3138 || (r_type == R_ARM_THM_JUMP24))
3141 if (st_type == STT_ARM_TFUNC)
3143 /* Thumb to thumb. */
3146 stub_type = (info->shared | globals->pic_veneer)
3148 ? ((globals->use_blx
3149 && (r_type ==R_ARM_THM_CALL))
3150 /* V5T and above. Stub starts with ARM code, so
3151 we must be able to switch mode before
3152 reaching it, which is only possible for 'bl'
3153 (ie R_ARM_THM_CALL relocation). */
3154 ? arm_stub_long_branch_any_thumb_pic
3155 /* On V4T, use Thumb code only. */
3156 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3158 /* non-PIC stubs. */
3159 : ((globals->use_blx
3160 && (r_type ==R_ARM_THM_CALL))
3161 /* V5T and above. */
3162 ? arm_stub_long_branch_any_any
3164 : arm_stub_long_branch_v4t_thumb_thumb);
3168 stub_type = (info->shared | globals->pic_veneer)
3170 ? arm_stub_long_branch_thumb_only_pic
3172 : arm_stub_long_branch_thumb_only;
3179 && sym_sec->owner != NULL
3180 && !INTERWORK_FLAG (sym_sec->owner))
3182 (*_bfd_error_handler)
3183 (_("%B(%s): warning: interworking not enabled.\n"
3184 " first occurrence: %B: Thumb call to ARM"),
3185 sym_sec->owner, input_bfd, name);
3188 stub_type = (info->shared | globals->pic_veneer)
3190 ? ((globals->use_blx
3191 && (r_type ==R_ARM_THM_CALL))
3192 /* V5T and above. */
3193 ? arm_stub_long_branch_any_arm_pic
3195 : arm_stub_long_branch_v4t_thumb_arm_pic)
3197 /* non-PIC stubs. */
3198 : ((globals->use_blx
3199 && (r_type ==R_ARM_THM_CALL))
3200 /* V5T and above. */
3201 ? arm_stub_long_branch_any_any
3203 : arm_stub_long_branch_v4t_thumb_arm);
3205 /* Handle v4t short branches. */
3206 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3207 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3208 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3209 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3213 else if (r_type == R_ARM_CALL
3214 || r_type == R_ARM_JUMP24
3215 || r_type == R_ARM_PLT32)
3217 if (st_type == STT_ARM_TFUNC)
3222 && sym_sec->owner != NULL
3223 && !INTERWORK_FLAG (sym_sec->owner))
3225 (*_bfd_error_handler)
3226 (_("%B(%s): warning: interworking not enabled.\n"
3227 " first occurrence: %B: ARM call to Thumb"),
3228 sym_sec->owner, input_bfd, name);
3231 /* We have an extra 2-bytes reach because of
3232 the mode change (bit 24 (H) of BLX encoding). */
3233 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3234 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3235 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3236 || (r_type == R_ARM_JUMP24)
3237 || (r_type == R_ARM_PLT32))
3239 stub_type = (info->shared | globals->pic_veneer)
3241 ? ((globals->use_blx)
3242 /* V5T and above. */
3243 ? arm_stub_long_branch_any_thumb_pic
3245 : arm_stub_long_branch_v4t_arm_thumb_pic)
3247 /* non-PIC stubs. */
3248 : ((globals->use_blx)
3249 /* V5T and above. */
3250 ? arm_stub_long_branch_any_any
3252 : arm_stub_long_branch_v4t_arm_thumb);
3258 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3259 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3261 stub_type = (info->shared | globals->pic_veneer)
3263 ? arm_stub_long_branch_any_arm_pic
3264 /* non-PIC stubs. */
3265 : arm_stub_long_branch_any_any;
3270 /* If a stub is needed, record the actual destination type. */
3271 if (stub_type != arm_stub_none)
3272 *actual_st_type = st_type;
3277 /* Build a name for an entry in the stub hash table. */
3280 elf32_arm_stub_name (const asection *input_section,
3281 const asection *sym_sec,
3282 const struct elf32_arm_link_hash_entry *hash,
3283 const Elf_Internal_Rela *rel,
3284 enum elf32_arm_stub_type stub_type)
3291 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3292 stub_name = (char *) bfd_malloc (len);
3293 if (stub_name != NULL)
3294 sprintf (stub_name, "%08x_%s+%x_%d",
3295 input_section->id & 0xffffffff,
3296 hash->root.root.root.string,
3297 (int) rel->r_addend & 0xffffffff,
3302 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3303 stub_name = (char *) bfd_malloc (len);
3304 if (stub_name != NULL)
3305 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3306 input_section->id & 0xffffffff,
3307 sym_sec->id & 0xffffffff,
3308 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3309 (int) rel->r_addend & 0xffffffff,
3316 /* Look up an entry in the stub hash. Stub entries are cached because
3317 creating the stub name takes a bit of time. */
3319 static struct elf32_arm_stub_hash_entry *
3320 elf32_arm_get_stub_entry (const asection *input_section,
3321 const asection *sym_sec,
3322 struct elf_link_hash_entry *hash,
3323 const Elf_Internal_Rela *rel,
3324 struct elf32_arm_link_hash_table *htab,
3325 enum elf32_arm_stub_type stub_type)
3327 struct elf32_arm_stub_hash_entry *stub_entry;
3328 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3329 const asection *id_sec;
3331 if ((input_section->flags & SEC_CODE) == 0)
3334 /* If this input section is part of a group of sections sharing one
3335 stub section, then use the id of the first section in the group.
3336 Stub names need to include a section id, as there may well be
3337 more than one stub used to reach say, printf, and we need to
3338 distinguish between them. */
3339 id_sec = htab->stub_group[input_section->id].link_sec;
3341 if (h != NULL && h->stub_cache != NULL
3342 && h->stub_cache->h == h
3343 && h->stub_cache->id_sec == id_sec
3344 && h->stub_cache->stub_type == stub_type)
3346 stub_entry = h->stub_cache;
3352 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3353 if (stub_name == NULL)
3356 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3357 stub_name, FALSE, FALSE);
3359 h->stub_cache = stub_entry;
3367 /* Find or create a stub section. Returns a pointer to the stub section, and
3368 the section to which the stub section will be attached (in *LINK_SEC_P).
3369 LINK_SEC_P may be NULL. */
3372 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3373 struct elf32_arm_link_hash_table *htab)
3378 link_sec = htab->stub_group[section->id].link_sec;
3379 stub_sec = htab->stub_group[section->id].stub_sec;
3380 if (stub_sec == NULL)
3382 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3383 if (stub_sec == NULL)
3389 namelen = strlen (link_sec->name);
3390 len = namelen + sizeof (STUB_SUFFIX);
3391 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3395 memcpy (s_name, link_sec->name, namelen);
3396 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3397 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3398 if (stub_sec == NULL)
3400 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3402 htab->stub_group[section->id].stub_sec = stub_sec;
3406 *link_sec_p = link_sec;
3411 /* Add a new stub entry to the stub hash. Not all fields of the new
3412 stub entry are initialised. */
3414 static struct elf32_arm_stub_hash_entry *
3415 elf32_arm_add_stub (const char *stub_name,
3417 struct elf32_arm_link_hash_table *htab)
3421 struct elf32_arm_stub_hash_entry *stub_entry;
3423 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3424 if (stub_sec == NULL)
3427 /* Enter this entry into the linker stub hash table. */
3428 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3430 if (stub_entry == NULL)
3432 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3438 stub_entry->stub_sec = stub_sec;
3439 stub_entry->stub_offset = 0;
3440 stub_entry->id_sec = link_sec;
3445 /* Store an Arm insn into an output section not processed by
3446 elf32_arm_write_section. */
3449 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3450 bfd * output_bfd, bfd_vma val, void * ptr)
3452 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3453 bfd_putl32 (val, ptr);
3455 bfd_putb32 (val, ptr);
3458 /* Store a 16-bit Thumb insn into an output section not processed by
3459 elf32_arm_write_section. */
3462 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3463 bfd * output_bfd, bfd_vma val, void * ptr)
3465 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3466 bfd_putl16 (val, ptr);
3468 bfd_putb16 (val, ptr);
3471 static bfd_reloc_status_type elf32_arm_final_link_relocate
3472 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3473 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3474 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3477 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
3481 case arm_stub_a8_veneer_b_cond:
3482 case arm_stub_a8_veneer_b:
3483 case arm_stub_a8_veneer_bl:
3486 case arm_stub_long_branch_any_any:
3487 case arm_stub_long_branch_v4t_arm_thumb:
3488 case arm_stub_long_branch_thumb_only:
3489 case arm_stub_long_branch_v4t_thumb_thumb:
3490 case arm_stub_long_branch_v4t_thumb_arm:
3491 case arm_stub_short_branch_v4t_thumb_arm:
3492 case arm_stub_long_branch_any_arm_pic:
3493 case arm_stub_long_branch_any_thumb_pic:
3494 case arm_stub_long_branch_v4t_thumb_thumb_pic:
3495 case arm_stub_long_branch_v4t_arm_thumb_pic:
3496 case arm_stub_long_branch_v4t_thumb_arm_pic:
3497 case arm_stub_long_branch_thumb_only_pic:
3498 case arm_stub_a8_veneer_blx:
3502 abort (); /* Should be unreachable. */
3507 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3511 struct elf32_arm_stub_hash_entry *stub_entry;
3512 struct elf32_arm_link_hash_table *globals;
3513 struct bfd_link_info *info;
3520 const insn_sequence *template_sequence;
3522 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3523 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3526 /* Massage our args to the form they really have. */
3527 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3528 info = (struct bfd_link_info *) in_arg;
3530 globals = elf32_arm_hash_table (info);
3531 if (globals == NULL)
3534 stub_sec = stub_entry->stub_sec;
3536 if ((globals->fix_cortex_a8 < 0)
3537 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
3538 /* We have to do less-strictly-aligned fixes last. */
3541 /* Make a note of the offset within the stubs for this entry. */
3542 stub_entry->stub_offset = stub_sec->size;
3543 loc = stub_sec->contents + stub_entry->stub_offset;
3545 stub_bfd = stub_sec->owner;
3547 /* This is the address of the stub destination. */
3548 sym_value = (stub_entry->target_value
3549 + stub_entry->target_section->output_offset
3550 + stub_entry->target_section->output_section->vma);
3552 template_sequence = stub_entry->stub_template;
3553 template_size = stub_entry->stub_template_size;
3556 for (i = 0; i < template_size; i++)
3558 switch (template_sequence[i].type)
3562 bfd_vma data = (bfd_vma) template_sequence[i].data;
3563 if (template_sequence[i].reloc_addend != 0)
3565 /* We've borrowed the reloc_addend field to mean we should
3566 insert a condition code into this (Thumb-1 branch)
3567 instruction. See THUMB16_BCOND_INSN. */
3568 BFD_ASSERT ((data & 0xff00) == 0xd000);
3569 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3571 bfd_put_16 (stub_bfd, data, loc + size);
3577 bfd_put_16 (stub_bfd,
3578 (template_sequence[i].data >> 16) & 0xffff,
3580 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3582 if (template_sequence[i].r_type != R_ARM_NONE)
3584 stub_reloc_idx[nrelocs] = i;
3585 stub_reloc_offset[nrelocs++] = size;
3591 bfd_put_32 (stub_bfd, template_sequence[i].data,
3593 /* Handle cases where the target is encoded within the
3595 if (template_sequence[i].r_type == R_ARM_JUMP24)
3597 stub_reloc_idx[nrelocs] = i;
3598 stub_reloc_offset[nrelocs++] = size;
3604 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3605 stub_reloc_idx[nrelocs] = i;
3606 stub_reloc_offset[nrelocs++] = size;
3616 stub_sec->size += size;
3618 /* Stub size has already been computed in arm_size_one_stub. Check
3620 BFD_ASSERT (size == stub_entry->stub_size);
3622 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3623 if (stub_entry->st_type == STT_ARM_TFUNC)
3626 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3628 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3630 for (i = 0; i < nrelocs; i++)
3631 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3632 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3633 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3634 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3636 Elf_Internal_Rela rel;
3637 bfd_boolean unresolved_reloc;
3638 char *error_message;
3640 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3641 ? STT_ARM_TFUNC : 0;
3642 bfd_vma points_to = sym_value + stub_entry->target_addend;
3644 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3645 rel.r_info = ELF32_R_INFO (0,
3646 template_sequence[stub_reloc_idx[i]].r_type);
3647 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3649 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3650 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3651 template should refer back to the instruction after the original
3653 points_to = sym_value;
3655 /* There may be unintended consequences if this is not true. */
3656 BFD_ASSERT (stub_entry->h == NULL);
3658 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3659 properly. We should probably use this function unconditionally,
3660 rather than only for certain relocations listed in the enclosing
3661 conditional, for the sake of consistency. */
3662 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3663 (template_sequence[stub_reloc_idx[i]].r_type),
3664 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3665 points_to, info, stub_entry->target_section, "", sym_flags,
3666 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3671 Elf_Internal_Rela rel;
3672 bfd_boolean unresolved_reloc;
3673 char *error_message;
3674 bfd_vma points_to = sym_value + stub_entry->target_addend
3675 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3677 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3678 rel.r_info = ELF32_R_INFO (0,
3679 template_sequence[stub_reloc_idx[i]].r_type);
3682 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3683 (template_sequence[stub_reloc_idx[i]].r_type),
3684 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3685 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3686 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3694 /* Calculate the template, template size and instruction size for a stub.
3695 Return value is the instruction size. */
3698 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3699 const insn_sequence **stub_template,
3700 int *stub_template_size)
3702 const insn_sequence *template_sequence = NULL;
3703 int template_size = 0, i;
3706 template_sequence = stub_definitions[stub_type].template_sequence;
3708 *stub_template = template_sequence;
3710 template_size = stub_definitions[stub_type].template_size;
3711 if (stub_template_size)
3712 *stub_template_size = template_size;
3715 for (i = 0; i < template_size; i++)
3717 switch (template_sequence[i].type)
3738 /* As above, but don't actually build the stub. Just bump offset so
3739 we know stub section sizes. */
3742 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3743 void *in_arg ATTRIBUTE_UNUSED)
3745 struct elf32_arm_stub_hash_entry *stub_entry;
3746 const insn_sequence *template_sequence;
3747 int template_size, size;
3749 /* Massage our args to the form they really have. */
3750 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3752 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3753 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3755 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3758 stub_entry->stub_size = size;
3759 stub_entry->stub_template = template_sequence;
3760 stub_entry->stub_template_size = template_size;
3762 size = (size + 7) & ~7;
3763 stub_entry->stub_sec->size += size;
3768 /* External entry points for sizing and building linker stubs. */
3770 /* Set up various things so that we can make a list of input sections
3771 for each output section included in the link. Returns -1 on error,
3772 0 when no stubs will be needed, and 1 on success. */
3775 elf32_arm_setup_section_lists (bfd *output_bfd,
3776 struct bfd_link_info *info)
3779 unsigned int bfd_count;
3780 int top_id, top_index;
3782 asection **input_list, **list;
3784 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3788 if (! is_elf_hash_table (htab))
3791 /* Count the number of input BFDs and find the top input section id. */
3792 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3794 input_bfd = input_bfd->link_next)
3797 for (section = input_bfd->sections;
3799 section = section->next)
3801 if (top_id < section->id)
3802 top_id = section->id;
3805 htab->bfd_count = bfd_count;
3807 amt = sizeof (struct map_stub) * (top_id + 1);
3808 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3809 if (htab->stub_group == NULL)
3811 htab->top_id = top_id;
3813 /* We can't use output_bfd->section_count here to find the top output
3814 section index as some sections may have been removed, and
3815 _bfd_strip_section_from_output doesn't renumber the indices. */
3816 for (section = output_bfd->sections, top_index = 0;
3818 section = section->next)
3820 if (top_index < section->index)
3821 top_index = section->index;
3824 htab->top_index = top_index;
3825 amt = sizeof (asection *) * (top_index + 1);
3826 input_list = (asection **) bfd_malloc (amt);
3827 htab->input_list = input_list;
3828 if (input_list == NULL)
3831 /* For sections we aren't interested in, mark their entries with a
3832 value we can check later. */
3833 list = input_list + top_index;
3835 *list = bfd_abs_section_ptr;
3836 while (list-- != input_list);
3838 for (section = output_bfd->sections;
3840 section = section->next)
3842 if ((section->flags & SEC_CODE) != 0)
3843 input_list[section->index] = NULL;
3849 /* The linker repeatedly calls this function for each input section,
3850 in the order that input sections are linked into output sections.
3851 Build lists of input sections to determine groupings between which
3852 we may insert linker stubs. */
3855 elf32_arm_next_input_section (struct bfd_link_info *info,
3858 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3863 if (isec->output_section->index <= htab->top_index)
3865 asection **list = htab->input_list + isec->output_section->index;
3867 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3869 /* Steal the link_sec pointer for our list. */
3870 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3871 /* This happens to make the list in reverse order,
3872 which we reverse later. */
3873 PREV_SEC (isec) = *list;
3879 /* See whether we can group stub sections together. Grouping stub
3880 sections may result in fewer stubs. More importantly, we need to
3881 put all .init* and .fini* stubs at the end of the .init or
3882 .fini output sections respectively, because glibc splits the
3883 _init and _fini functions into multiple parts. Putting a stub in
3884 the middle of a function is not a good idea. */
3887 group_sections (struct elf32_arm_link_hash_table *htab,
3888 bfd_size_type stub_group_size,
3889 bfd_boolean stubs_always_after_branch)
3891 asection **list = htab->input_list;
3895 asection *tail = *list;
3898 if (tail == bfd_abs_section_ptr)
3901 /* Reverse the list: we must avoid placing stubs at the
3902 beginning of the section because the beginning of the text
3903 section may be required for an interrupt vector in bare metal
3905 #define NEXT_SEC PREV_SEC
3907 while (tail != NULL)
3909 /* Pop from tail. */
3910 asection *item = tail;
3911 tail = PREV_SEC (item);
3914 NEXT_SEC (item) = head;
3918 while (head != NULL)
3922 bfd_vma stub_group_start = head->output_offset;
3923 bfd_vma end_of_next;
3926 while (NEXT_SEC (curr) != NULL)
3928 next = NEXT_SEC (curr);
3929 end_of_next = next->output_offset + next->size;
3930 if (end_of_next - stub_group_start >= stub_group_size)
3931 /* End of NEXT is too far from start, so stop. */
3933 /* Add NEXT to the group. */
3937 /* OK, the size from the start to the start of CURR is less
3938 than stub_group_size and thus can be handled by one stub
3939 section. (Or the head section is itself larger than
3940 stub_group_size, in which case we may be toast.)
3941 We should really be keeping track of the total size of
3942 stubs added here, as stubs contribute to the final output
3946 next = NEXT_SEC (head);
3947 /* Set up this stub group. */
3948 htab->stub_group[head->id].link_sec = curr;
3950 while (head != curr && (head = next) != NULL);
3952 /* But wait, there's more! Input sections up to stub_group_size
3953 bytes after the stub section can be handled by it too. */
3954 if (!stubs_always_after_branch)
3956 stub_group_start = curr->output_offset + curr->size;
3958 while (next != NULL)
3960 end_of_next = next->output_offset + next->size;
3961 if (end_of_next - stub_group_start >= stub_group_size)
3962 /* End of NEXT is too far from stubs, so stop. */
3964 /* Add NEXT to the stub group. */
3966 next = NEXT_SEC (head);
3967 htab->stub_group[head->id].link_sec = curr;
3973 while (list++ != htab->input_list + htab->top_index);
3975 free (htab->input_list);
3980 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3984 a8_reloc_compare (const void *a, const void *b)
3986 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3987 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3989 if (ra->from < rb->from)
3991 else if (ra->from > rb->from)
3997 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3998 const char *, char **);
4000 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4001 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4002 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4006 cortex_a8_erratum_scan (bfd *input_bfd,
4007 struct bfd_link_info *info,
4008 struct a8_erratum_fix **a8_fixes_p,
4009 unsigned int *num_a8_fixes_p,
4010 unsigned int *a8_fix_table_size_p,
4011 struct a8_erratum_reloc *a8_relocs,
4012 unsigned int num_a8_relocs,
4013 unsigned prev_num_a8_fixes,
4014 bfd_boolean *stub_changed_p)
4017 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4018 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4019 unsigned int num_a8_fixes = *num_a8_fixes_p;
4020 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4025 for (section = input_bfd->sections;
4027 section = section->next)
4029 bfd_byte *contents = NULL;
4030 struct _arm_elf_section_data *sec_data;
4034 if (elf_section_type (section) != SHT_PROGBITS
4035 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4036 || (section->flags & SEC_EXCLUDE) != 0
4037 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4038 || (section->output_section == bfd_abs_section_ptr))
4041 base_vma = section->output_section->vma + section->output_offset;
4043 if (elf_section_data (section)->this_hdr.contents != NULL)
4044 contents = elf_section_data (section)->this_hdr.contents;
4045 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4048 sec_data = elf32_arm_section_data (section);
4050 for (span = 0; span < sec_data->mapcount; span++)
4052 unsigned int span_start = sec_data->map[span].vma;
4053 unsigned int span_end = (span == sec_data->mapcount - 1)
4054 ? section->size : sec_data->map[span + 1].vma;
4056 char span_type = sec_data->map[span].type;
4057 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4059 if (span_type != 't')
4062 /* Span is entirely within a single 4KB region: skip scanning. */
4063 if (((base_vma + span_start) & ~0xfff)
4064 == ((base_vma + span_end) & ~0xfff))
4067 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4069 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4070 * The branch target is in the same 4KB region as the
4071 first half of the branch.
4072 * The instruction before the branch is a 32-bit
4073 length non-branch instruction. */
4074 for (i = span_start; i < span_end;)
4076 unsigned int insn = bfd_getl16 (&contents[i]);
4077 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4078 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4080 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4085 /* Load the rest of the insn (in manual-friendly order). */
4086 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4088 /* Encoding T4: B<c>.W. */
4089 is_b = (insn & 0xf800d000) == 0xf0009000;
4090 /* Encoding T1: BL<c>.W. */
4091 is_bl = (insn & 0xf800d000) == 0xf000d000;
4092 /* Encoding T2: BLX<c>.W. */
4093 is_blx = (insn & 0xf800d000) == 0xf000c000;
4094 /* Encoding T3: B<c>.W (not permitted in IT block). */
4095 is_bcc = (insn & 0xf800d000) == 0xf0008000
4096 && (insn & 0x07f00000) != 0x03800000;
4099 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4101 if (((base_vma + i) & 0xfff) == 0xffe
4105 && ! last_was_branch)
4107 bfd_signed_vma offset = 0;
4108 bfd_boolean force_target_arm = FALSE;
4109 bfd_boolean force_target_thumb = FALSE;
4111 enum elf32_arm_stub_type stub_type = arm_stub_none;
4112 struct a8_erratum_reloc key, *found;
4114 key.from = base_vma + i;
4115 found = (struct a8_erratum_reloc *)
4116 bsearch (&key, a8_relocs, num_a8_relocs,
4117 sizeof (struct a8_erratum_reloc),
4122 char *error_message = NULL;
4123 struct elf_link_hash_entry *entry;
4124 bfd_boolean use_plt = FALSE;
4126 /* We don't care about the error returned from this
4127 function, only if there is glue or not. */
4128 entry = find_thumb_glue (info, found->sym_name,
4132 found->non_a8_stub = TRUE;
4134 /* Keep a simpler condition, for the sake of clarity. */
4135 if (htab->splt != NULL && found->hash != NULL
4136 && found->hash->root.plt.offset != (bfd_vma) -1)
4139 if (found->r_type == R_ARM_THM_CALL)
4141 if (found->st_type != STT_ARM_TFUNC || use_plt)
4142 force_target_arm = TRUE;
4144 force_target_thumb = TRUE;
4148 /* Check if we have an offending branch instruction. */
4150 if (found && found->non_a8_stub)
4151 /* We've already made a stub for this instruction, e.g.
4152 it's a long branch or a Thumb->ARM stub. Assume that
4153 stub will suffice to work around the A8 erratum (see
4154 setting of always_after_branch above). */
4158 offset = (insn & 0x7ff) << 1;
4159 offset |= (insn & 0x3f0000) >> 4;
4160 offset |= (insn & 0x2000) ? 0x40000 : 0;
4161 offset |= (insn & 0x800) ? 0x80000 : 0;
4162 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4163 if (offset & 0x100000)
4164 offset |= ~ ((bfd_signed_vma) 0xfffff);
4165 stub_type = arm_stub_a8_veneer_b_cond;
4167 else if (is_b || is_bl || is_blx)
4169 int s = (insn & 0x4000000) != 0;
4170 int j1 = (insn & 0x2000) != 0;
4171 int j2 = (insn & 0x800) != 0;
4175 offset = (insn & 0x7ff) << 1;
4176 offset |= (insn & 0x3ff0000) >> 4;
4180 if (offset & 0x1000000)
4181 offset |= ~ ((bfd_signed_vma) 0xffffff);
4184 offset &= ~ ((bfd_signed_vma) 3);
4186 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4187 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4190 if (stub_type != arm_stub_none)
4192 bfd_vma pc_for_insn = base_vma + i + 4;
4194 /* The original instruction is a BL, but the target is
4195 an ARM instruction. If we were not making a stub,
4196 the BL would have been converted to a BLX. Use the
4197 BLX stub instead in that case. */
4198 if (htab->use_blx && force_target_arm
4199 && stub_type == arm_stub_a8_veneer_bl)
4201 stub_type = arm_stub_a8_veneer_blx;
4205 /* Conversely, if the original instruction was
4206 BLX but the target is Thumb mode, use the BL
4208 else if (force_target_thumb
4209 && stub_type == arm_stub_a8_veneer_blx)
4211 stub_type = arm_stub_a8_veneer_bl;
4217 pc_for_insn &= ~ ((bfd_vma) 3);
4219 /* If we found a relocation, use the proper destination,
4220 not the offset in the (unrelocated) instruction.
4221 Note this is always done if we switched the stub type
4225 (bfd_signed_vma) (found->destination - pc_for_insn);
4227 target = pc_for_insn + offset;
4229 /* The BLX stub is ARM-mode code. Adjust the offset to
4230 take the different PC value (+8 instead of +4) into
4232 if (stub_type == arm_stub_a8_veneer_blx)
4235 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4237 char *stub_name = NULL;
4239 if (num_a8_fixes == a8_fix_table_size)
4241 a8_fix_table_size *= 2;
4242 a8_fixes = (struct a8_erratum_fix *)
4243 bfd_realloc (a8_fixes,
4244 sizeof (struct a8_erratum_fix)
4245 * a8_fix_table_size);
4248 if (num_a8_fixes < prev_num_a8_fixes)
4250 /* If we're doing a subsequent scan,
4251 check if we've found the same fix as
4252 before, and try and reuse the stub
4254 stub_name = a8_fixes[num_a8_fixes].stub_name;
4255 if ((a8_fixes[num_a8_fixes].section != section)
4256 || (a8_fixes[num_a8_fixes].offset != i))
4260 *stub_changed_p = TRUE;
4266 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4267 if (stub_name != NULL)
4268 sprintf (stub_name, "%x:%x", section->id, i);
4271 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4272 a8_fixes[num_a8_fixes].section = section;
4273 a8_fixes[num_a8_fixes].offset = i;
4274 a8_fixes[num_a8_fixes].addend = offset;
4275 a8_fixes[num_a8_fixes].orig_insn = insn;
4276 a8_fixes[num_a8_fixes].stub_name = stub_name;
4277 a8_fixes[num_a8_fixes].stub_type = stub_type;
4278 a8_fixes[num_a8_fixes].st_type =
4279 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4286 i += insn_32bit ? 4 : 2;
4287 last_was_32bit = insn_32bit;
4288 last_was_branch = is_32bit_branch;
4292 if (elf_section_data (section)->this_hdr.contents == NULL)
4296 *a8_fixes_p = a8_fixes;
4297 *num_a8_fixes_p = num_a8_fixes;
4298 *a8_fix_table_size_p = a8_fix_table_size;
4303 /* Determine and set the size of the stub section for a final link.
4305 The basic idea here is to examine all the relocations looking for
4306 PC-relative calls to a target that is unreachable with a "bl"
4310 elf32_arm_size_stubs (bfd *output_bfd,
4312 struct bfd_link_info *info,
4313 bfd_signed_vma group_size,
4314 asection * (*add_stub_section) (const char *, asection *),
4315 void (*layout_sections_again) (void))
4317 bfd_size_type stub_group_size;
4318 bfd_boolean stubs_always_after_branch;
4319 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4320 struct a8_erratum_fix *a8_fixes = NULL;
4321 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4322 struct a8_erratum_reloc *a8_relocs = NULL;
4323 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4328 if (htab->fix_cortex_a8)
4330 a8_fixes = (struct a8_erratum_fix *)
4331 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4332 a8_relocs = (struct a8_erratum_reloc *)
4333 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4336 /* Propagate mach to stub bfd, because it may not have been
4337 finalized when we created stub_bfd. */
4338 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4339 bfd_get_mach (output_bfd));
4341 /* Stash our params away. */
4342 htab->stub_bfd = stub_bfd;
4343 htab->add_stub_section = add_stub_section;
4344 htab->layout_sections_again = layout_sections_again;
4345 stubs_always_after_branch = group_size < 0;
4347 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4348 as the first half of a 32-bit branch straddling two 4K pages. This is a
4349 crude way of enforcing that. */
4350 if (htab->fix_cortex_a8)
4351 stubs_always_after_branch = 1;
4354 stub_group_size = -group_size;
4356 stub_group_size = group_size;
4358 if (stub_group_size == 1)
4360 /* Default values. */
4361 /* Thumb branch range is +-4MB has to be used as the default
4362 maximum size (a given section can contain both ARM and Thumb
4363 code, so the worst case has to be taken into account).
4365 This value is 24K less than that, which allows for 2025
4366 12-byte stubs. If we exceed that, then we will fail to link.
4367 The user will have to relink with an explicit group size
4369 stub_group_size = 4170000;
4372 group_sections (htab, stub_group_size, stubs_always_after_branch);
4374 /* If we're applying the cortex A8 fix, we need to determine the
4375 program header size now, because we cannot change it later --
4376 that could alter section placements. Notice the A8 erratum fix
4377 ends up requiring the section addresses to remain unchanged
4378 modulo the page size. That's something we cannot represent
4379 inside BFD, and we don't want to force the section alignment to
4380 be the page size. */
4381 if (htab->fix_cortex_a8)
4382 (*htab->layout_sections_again) ();
4387 unsigned int bfd_indx;
4389 bfd_boolean stub_changed = FALSE;
4390 unsigned prev_num_a8_fixes = num_a8_fixes;
4393 for (input_bfd = info->input_bfds, bfd_indx = 0;
4395 input_bfd = input_bfd->link_next, bfd_indx++)
4397 Elf_Internal_Shdr *symtab_hdr;
4399 Elf_Internal_Sym *local_syms = NULL;
4403 /* We'll need the symbol table in a second. */
4404 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4405 if (symtab_hdr->sh_info == 0)
4408 /* Walk over each section attached to the input bfd. */
4409 for (section = input_bfd->sections;
4411 section = section->next)
4413 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4415 /* If there aren't any relocs, then there's nothing more
4417 if ((section->flags & SEC_RELOC) == 0
4418 || section->reloc_count == 0
4419 || (section->flags & SEC_CODE) == 0)
4422 /* If this section is a link-once section that will be
4423 discarded, then don't create any stubs. */
4424 if (section->output_section == NULL
4425 || section->output_section->owner != output_bfd)
4428 /* Get the relocs. */
4430 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4431 NULL, info->keep_memory);
4432 if (internal_relocs == NULL)
4433 goto error_ret_free_local;
4435 /* Now examine each relocation. */
4436 irela = internal_relocs;
4437 irelaend = irela + section->reloc_count;
4438 for (; irela < irelaend; irela++)
4440 unsigned int r_type, r_indx;
4441 enum elf32_arm_stub_type stub_type;
4442 struct elf32_arm_stub_hash_entry *stub_entry;
4445 bfd_vma destination;
4446 struct elf32_arm_link_hash_entry *hash;
4447 const char *sym_name;
4449 const asection *id_sec;
4451 bfd_boolean created_stub = FALSE;
4453 r_type = ELF32_R_TYPE (irela->r_info);
4454 r_indx = ELF32_R_SYM (irela->r_info);
4456 if (r_type >= (unsigned int) R_ARM_max)
4458 bfd_set_error (bfd_error_bad_value);
4459 error_ret_free_internal:
4460 if (elf_section_data (section)->relocs == NULL)
4461 free (internal_relocs);
4462 goto error_ret_free_local;
4465 /* Only look for stubs on branch instructions. */
4466 if ((r_type != (unsigned int) R_ARM_CALL)
4467 && (r_type != (unsigned int) R_ARM_THM_CALL)
4468 && (r_type != (unsigned int) R_ARM_JUMP24)
4469 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4470 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4471 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4472 && (r_type != (unsigned int) R_ARM_PLT32))
4475 /* Now determine the call target, its name, value,
4482 if (r_indx < symtab_hdr->sh_info)
4484 /* It's a local symbol. */
4485 Elf_Internal_Sym *sym;
4487 if (local_syms == NULL)
4490 = (Elf_Internal_Sym *) symtab_hdr->contents;
4491 if (local_syms == NULL)
4493 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4494 symtab_hdr->sh_info, 0,
4496 if (local_syms == NULL)
4497 goto error_ret_free_internal;
4500 sym = local_syms + r_indx;
4501 if (sym->st_shndx == SHN_UNDEF)
4502 sym_sec = bfd_und_section_ptr;
4503 else if (sym->st_shndx == SHN_ABS)
4504 sym_sec = bfd_abs_section_ptr;
4505 else if (sym->st_shndx == SHN_COMMON)
4506 sym_sec = bfd_com_section_ptr;
4509 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
4512 /* This is an undefined symbol. It can never
4516 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4517 sym_value = sym->st_value;
4518 destination = (sym_value + irela->r_addend
4519 + sym_sec->output_offset
4520 + sym_sec->output_section->vma);
4521 st_type = ELF_ST_TYPE (sym->st_info);
4523 = bfd_elf_string_from_elf_section (input_bfd,
4524 symtab_hdr->sh_link,
4529 /* It's an external symbol. */
4532 e_indx = r_indx - symtab_hdr->sh_info;
4533 hash = ((struct elf32_arm_link_hash_entry *)
4534 elf_sym_hashes (input_bfd)[e_indx]);
4536 while (hash->root.root.type == bfd_link_hash_indirect
4537 || hash->root.root.type == bfd_link_hash_warning)
4538 hash = ((struct elf32_arm_link_hash_entry *)
4539 hash->root.root.u.i.link);
4541 if (hash->root.root.type == bfd_link_hash_defined
4542 || hash->root.root.type == bfd_link_hash_defweak)
4544 sym_sec = hash->root.root.u.def.section;
4545 sym_value = hash->root.root.u.def.value;
4547 struct elf32_arm_link_hash_table *globals =
4548 elf32_arm_hash_table (info);
4550 /* For a destination in a shared library,
4551 use the PLT stub as target address to
4552 decide whether a branch stub is
4555 && globals->splt != NULL
4557 && hash->root.plt.offset != (bfd_vma) -1)
4559 sym_sec = globals->splt;
4560 sym_value = hash->root.plt.offset;
4561 if (sym_sec->output_section != NULL)
4562 destination = (sym_value
4563 + sym_sec->output_offset
4564 + sym_sec->output_section->vma);
4566 else if (sym_sec->output_section != NULL)
4567 destination = (sym_value + irela->r_addend
4568 + sym_sec->output_offset
4569 + sym_sec->output_section->vma);
4571 else if ((hash->root.root.type == bfd_link_hash_undefined)
4572 || (hash->root.root.type == bfd_link_hash_undefweak))
4574 /* For a shared library, use the PLT stub as
4575 target address to decide whether a long
4576 branch stub is needed.
4577 For absolute code, they cannot be handled. */
4578 struct elf32_arm_link_hash_table *globals =
4579 elf32_arm_hash_table (info);
4582 && globals->splt != NULL
4584 && hash->root.plt.offset != (bfd_vma) -1)
4586 sym_sec = globals->splt;
4587 sym_value = hash->root.plt.offset;
4588 if (sym_sec->output_section != NULL)
4589 destination = (sym_value
4590 + sym_sec->output_offset
4591 + sym_sec->output_section->vma);
4598 bfd_set_error (bfd_error_bad_value);
4599 goto error_ret_free_internal;
4601 st_type = ELF_ST_TYPE (hash->root.type);
4602 sym_name = hash->root.root.root.string;
4607 /* Determine what (if any) linker stub is needed. */
4608 stub_type = arm_type_of_stub (info, section, irela,
4610 destination, sym_sec,
4611 input_bfd, sym_name);
4612 if (stub_type == arm_stub_none)
4615 /* Support for grouping stub sections. */
4616 id_sec = htab->stub_group[section->id].link_sec;
4618 /* Get the name of this stub. */
4619 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4622 goto error_ret_free_internal;
4624 /* We've either created a stub for this reloc already,
4625 or we are about to. */
4626 created_stub = TRUE;
4628 stub_entry = arm_stub_hash_lookup
4629 (&htab->stub_hash_table, stub_name,
4631 if (stub_entry != NULL)
4633 /* The proper stub has already been created. */
4635 stub_entry->target_value = sym_value;
4639 stub_entry = elf32_arm_add_stub (stub_name, section,
4641 if (stub_entry == NULL)
4644 goto error_ret_free_internal;
4647 stub_entry->target_value = sym_value;
4648 stub_entry->target_section = sym_sec;
4649 stub_entry->stub_type = stub_type;
4650 stub_entry->h = hash;
4651 stub_entry->st_type = st_type;
4653 if (sym_name == NULL)
4654 sym_name = "unnamed";
4655 stub_entry->output_name = (char *)
4656 bfd_alloc (htab->stub_bfd,
4657 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4658 + strlen (sym_name));
4659 if (stub_entry->output_name == NULL)
4662 goto error_ret_free_internal;
4665 /* For historical reasons, use the existing names for
4666 ARM-to-Thumb and Thumb-to-ARM stubs. */
4667 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4668 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4669 && st_type != STT_ARM_TFUNC)
4670 sprintf (stub_entry->output_name,
4671 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4672 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4673 || (r_type == (unsigned int) R_ARM_JUMP24))
4674 && st_type == STT_ARM_TFUNC)
4675 sprintf (stub_entry->output_name,
4676 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4678 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4681 stub_changed = TRUE;
4685 /* Look for relocations which might trigger Cortex-A8
4687 if (htab->fix_cortex_a8
4688 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4689 || r_type == (unsigned int) R_ARM_THM_JUMP19
4690 || r_type == (unsigned int) R_ARM_THM_CALL
4691 || r_type == (unsigned int) R_ARM_THM_XPC22))
4693 bfd_vma from = section->output_section->vma
4694 + section->output_offset
4697 if ((from & 0xfff) == 0xffe)
4699 /* Found a candidate. Note we haven't checked the
4700 destination is within 4K here: if we do so (and
4701 don't create an entry in a8_relocs) we can't tell
4702 that a branch should have been relocated when
4704 if (num_a8_relocs == a8_reloc_table_size)
4706 a8_reloc_table_size *= 2;
4707 a8_relocs = (struct a8_erratum_reloc *)
4708 bfd_realloc (a8_relocs,
4709 sizeof (struct a8_erratum_reloc)
4710 * a8_reloc_table_size);
4713 a8_relocs[num_a8_relocs].from = from;
4714 a8_relocs[num_a8_relocs].destination = destination;
4715 a8_relocs[num_a8_relocs].r_type = r_type;
4716 a8_relocs[num_a8_relocs].st_type = st_type;
4717 a8_relocs[num_a8_relocs].sym_name = sym_name;
4718 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4719 a8_relocs[num_a8_relocs].hash = hash;
4726 /* We're done with the internal relocs, free them. */
4727 if (elf_section_data (section)->relocs == NULL)
4728 free (internal_relocs);
4731 if (htab->fix_cortex_a8)
4733 /* Sort relocs which might apply to Cortex-A8 erratum. */
4734 qsort (a8_relocs, num_a8_relocs,
4735 sizeof (struct a8_erratum_reloc),
4738 /* Scan for branches which might trigger Cortex-A8 erratum. */
4739 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4740 &num_a8_fixes, &a8_fix_table_size,
4741 a8_relocs, num_a8_relocs,
4742 prev_num_a8_fixes, &stub_changed)
4744 goto error_ret_free_local;
4748 if (prev_num_a8_fixes != num_a8_fixes)
4749 stub_changed = TRUE;
4754 /* OK, we've added some stubs. Find out the new size of the
4756 for (stub_sec = htab->stub_bfd->sections;
4758 stub_sec = stub_sec->next)
4760 /* Ignore non-stub sections. */
4761 if (!strstr (stub_sec->name, STUB_SUFFIX))
4767 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4769 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4770 if (htab->fix_cortex_a8)
4771 for (i = 0; i < num_a8_fixes; i++)
4773 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4774 a8_fixes[i].section, htab);
4776 if (stub_sec == NULL)
4777 goto error_ret_free_local;
4780 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4785 /* Ask the linker to do its stuff. */
4786 (*htab->layout_sections_again) ();
4789 /* Add stubs for Cortex-A8 erratum fixes now. */
4790 if (htab->fix_cortex_a8)
4792 for (i = 0; i < num_a8_fixes; i++)
4794 struct elf32_arm_stub_hash_entry *stub_entry;
4795 char *stub_name = a8_fixes[i].stub_name;
4796 asection *section = a8_fixes[i].section;
4797 unsigned int section_id = a8_fixes[i].section->id;
4798 asection *link_sec = htab->stub_group[section_id].link_sec;
4799 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4800 const insn_sequence *template_sequence;
4801 int template_size, size = 0;
4803 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4805 if (stub_entry == NULL)
4807 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4813 stub_entry->stub_sec = stub_sec;
4814 stub_entry->stub_offset = 0;
4815 stub_entry->id_sec = link_sec;
4816 stub_entry->stub_type = a8_fixes[i].stub_type;
4817 stub_entry->target_section = a8_fixes[i].section;
4818 stub_entry->target_value = a8_fixes[i].offset;
4819 stub_entry->target_addend = a8_fixes[i].addend;
4820 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4821 stub_entry->st_type = a8_fixes[i].st_type;
4823 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4827 stub_entry->stub_size = size;
4828 stub_entry->stub_template = template_sequence;
4829 stub_entry->stub_template_size = template_size;
4832 /* Stash the Cortex-A8 erratum fix array for use later in
4833 elf32_arm_write_section(). */
4834 htab->a8_erratum_fixes = a8_fixes;
4835 htab->num_a8_erratum_fixes = num_a8_fixes;
4839 htab->a8_erratum_fixes = NULL;
4840 htab->num_a8_erratum_fixes = 0;
4844 error_ret_free_local:
4848 /* Build all the stubs associated with the current output file. The
4849 stubs are kept in a hash table attached to the main linker hash
4850 table. We also set up the .plt entries for statically linked PIC
4851 functions here. This function is called via arm_elf_finish in the
4855 elf32_arm_build_stubs (struct bfd_link_info *info)
4858 struct bfd_hash_table *table;
4859 struct elf32_arm_link_hash_table *htab;
4861 htab = elf32_arm_hash_table (info);
4865 for (stub_sec = htab->stub_bfd->sections;
4867 stub_sec = stub_sec->next)
4871 /* Ignore non-stub sections. */
4872 if (!strstr (stub_sec->name, STUB_SUFFIX))
4875 /* Allocate memory to hold the linker stubs. */
4876 size = stub_sec->size;
4877 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4878 if (stub_sec->contents == NULL && size != 0)
4883 /* Build the stubs as directed by the stub hash table. */
4884 table = &htab->stub_hash_table;
4885 bfd_hash_traverse (table, arm_build_one_stub, info);
4886 if (htab->fix_cortex_a8)
4888 /* Place the cortex a8 stubs last. */
4889 htab->fix_cortex_a8 = -1;
4890 bfd_hash_traverse (table, arm_build_one_stub, info);
4896 /* Locate the Thumb encoded calling stub for NAME. */
4898 static struct elf_link_hash_entry *
4899 find_thumb_glue (struct bfd_link_info *link_info,
4901 char **error_message)
4904 struct elf_link_hash_entry *hash;
4905 struct elf32_arm_link_hash_table *hash_table;
4907 /* We need a pointer to the armelf specific hash table. */
4908 hash_table = elf32_arm_hash_table (link_info);
4909 if (hash_table == NULL)
4912 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4913 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4915 BFD_ASSERT (tmp_name);
4917 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4919 hash = elf_link_hash_lookup
4920 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4923 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4924 tmp_name, name) == -1)
4925 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4932 /* Locate the ARM encoded calling stub for NAME. */
4934 static struct elf_link_hash_entry *
4935 find_arm_glue (struct bfd_link_info *link_info,
4937 char **error_message)
4940 struct elf_link_hash_entry *myh;
4941 struct elf32_arm_link_hash_table *hash_table;
4943 /* We need a pointer to the elfarm specific hash table. */
4944 hash_table = elf32_arm_hash_table (link_info);
4945 if (hash_table == NULL)
4948 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4949 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4951 BFD_ASSERT (tmp_name);
4953 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4955 myh = elf_link_hash_lookup
4956 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4959 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4960 tmp_name, name) == -1)
4961 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4968 /* ARM->Thumb glue (static images):
4972 ldr r12, __func_addr
4975 .word func @ behave as if you saw a ARM_32 reloc.
4982 .word func @ behave as if you saw a ARM_32 reloc.
4984 (relocatable images)
4987 ldr r12, __func_offset
4993 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4994 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4995 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4996 static const insn32 a2t3_func_addr_insn = 0x00000001;
4998 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4999 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5000 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5002 #define ARM2THUMB_PIC_GLUE_SIZE 16
5003 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5004 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5005 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5007 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5011 __func_from_thumb: __func_from_thumb:
5013 nop ldr r6, __func_addr
5023 #define THUMB2ARM_GLUE_SIZE 8
5024 static const insn16 t2a1_bx_pc_insn = 0x4778;
5025 static const insn16 t2a2_noop_insn = 0x46c0;
5026 static const insn32 t2a3_b_insn = 0xea000000;
5028 #define VFP11_ERRATUM_VENEER_SIZE 8
5030 #define ARM_BX_VENEER_SIZE 12
5031 static const insn32 armbx1_tst_insn = 0xe3100001;
5032 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5033 static const insn32 armbx3_bx_insn = 0xe12fff10;
5035 #ifndef ELFARM_NABI_C_INCLUDED
5037 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5040 bfd_byte * contents;
5044 /* Do not include empty glue sections in the output. */
5047 s = bfd_get_section_by_name (abfd, name);
5049 s->flags |= SEC_EXCLUDE;
5054 BFD_ASSERT (abfd != NULL);
5056 s = bfd_get_section_by_name (abfd, name);
5057 BFD_ASSERT (s != NULL);
5059 contents = (bfd_byte *) bfd_alloc (abfd, size);
5061 BFD_ASSERT (s->size == size);
5062 s->contents = contents;
5066 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5068 struct elf32_arm_link_hash_table * globals;
5070 globals = elf32_arm_hash_table (info);
5071 BFD_ASSERT (globals != NULL);
5073 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5074 globals->arm_glue_size,
5075 ARM2THUMB_GLUE_SECTION_NAME);
5077 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5078 globals->thumb_glue_size,
5079 THUMB2ARM_GLUE_SECTION_NAME);
5081 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5082 globals->vfp11_erratum_glue_size,
5083 VFP11_ERRATUM_VENEER_SECTION_NAME);
5085 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5086 globals->bx_glue_size,
5087 ARM_BX_GLUE_SECTION_NAME);
5092 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5093 returns the symbol identifying the stub. */
5095 static struct elf_link_hash_entry *
5096 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5097 struct elf_link_hash_entry * h)
5099 const char * name = h->root.root.string;
5102 struct elf_link_hash_entry * myh;
5103 struct bfd_link_hash_entry * bh;
5104 struct elf32_arm_link_hash_table * globals;
5108 globals = elf32_arm_hash_table (link_info);
5109 BFD_ASSERT (globals != NULL);
5110 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5112 s = bfd_get_section_by_name
5113 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5115 BFD_ASSERT (s != NULL);
5117 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5118 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5120 BFD_ASSERT (tmp_name);
5122 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5124 myh = elf_link_hash_lookup
5125 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5129 /* We've already seen this guy. */
5134 /* The only trick here is using hash_table->arm_glue_size as the value.
5135 Even though the section isn't allocated yet, this is where we will be
5136 putting it. The +1 on the value marks that the stub has not been
5137 output yet - not that it is a Thumb function. */
5139 val = globals->arm_glue_size + 1;
5140 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5141 tmp_name, BSF_GLOBAL, s, val,
5142 NULL, TRUE, FALSE, &bh);
5144 myh = (struct elf_link_hash_entry *) bh;
5145 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5146 myh->forced_local = 1;
5150 if (link_info->shared || globals->root.is_relocatable_executable
5151 || globals->pic_veneer)
5152 size = ARM2THUMB_PIC_GLUE_SIZE;
5153 else if (globals->use_blx)
5154 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5156 size = ARM2THUMB_STATIC_GLUE_SIZE;
5159 globals->arm_glue_size += size;
5164 /* Allocate space for ARMv4 BX veneers. */
5167 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5170 struct elf32_arm_link_hash_table *globals;
5172 struct elf_link_hash_entry *myh;
5173 struct bfd_link_hash_entry *bh;
5176 /* BX PC does not need a veneer. */
5180 globals = elf32_arm_hash_table (link_info);
5181 BFD_ASSERT (globals != NULL);
5182 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5184 /* Check if this veneer has already been allocated. */
5185 if (globals->bx_glue_offset[reg])
5188 s = bfd_get_section_by_name
5189 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5191 BFD_ASSERT (s != NULL);
5193 /* Add symbol for veneer. */
5195 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5197 BFD_ASSERT (tmp_name);
5199 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5201 myh = elf_link_hash_lookup
5202 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5204 BFD_ASSERT (myh == NULL);
5207 val = globals->bx_glue_size;
5208 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5209 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5210 NULL, TRUE, FALSE, &bh);
5212 myh = (struct elf_link_hash_entry *) bh;
5213 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5214 myh->forced_local = 1;
5216 s->size += ARM_BX_VENEER_SIZE;
5217 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5218 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5222 /* Add an entry to the code/data map for section SEC. */
5225 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5227 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5228 unsigned int newidx;
5230 if (sec_data->map == NULL)
5232 sec_data->map = (elf32_arm_section_map *)
5233 bfd_malloc (sizeof (elf32_arm_section_map));
5234 sec_data->mapcount = 0;
5235 sec_data->mapsize = 1;
5238 newidx = sec_data->mapcount++;
5240 if (sec_data->mapcount > sec_data->mapsize)
5242 sec_data->mapsize *= 2;
5243 sec_data->map = (elf32_arm_section_map *)
5244 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5245 * sizeof (elf32_arm_section_map));
5250 sec_data->map[newidx].vma = vma;
5251 sec_data->map[newidx].type = type;
5256 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5257 veneers are handled for now. */
5260 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5261 elf32_vfp11_erratum_list *branch,
5263 asection *branch_sec,
5264 unsigned int offset)
5267 struct elf32_arm_link_hash_table *hash_table;
5269 struct elf_link_hash_entry *myh;
5270 struct bfd_link_hash_entry *bh;
5272 struct _arm_elf_section_data *sec_data;
5273 elf32_vfp11_erratum_list *newerr;
5275 hash_table = elf32_arm_hash_table (link_info);
5276 BFD_ASSERT (hash_table != NULL);
5277 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5279 s = bfd_get_section_by_name
5280 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5282 sec_data = elf32_arm_section_data (s);
5284 BFD_ASSERT (s != NULL);
5286 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5287 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5289 BFD_ASSERT (tmp_name);
5291 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5292 hash_table->num_vfp11_fixes);
5294 myh = elf_link_hash_lookup
5295 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5297 BFD_ASSERT (myh == NULL);
5300 val = hash_table->vfp11_erratum_glue_size;
5301 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5302 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5303 NULL, TRUE, FALSE, &bh);
5305 myh = (struct elf_link_hash_entry *) bh;
5306 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5307 myh->forced_local = 1;
5309 /* Link veneer back to calling location. */
5310 sec_data->erratumcount += 1;
5311 newerr = (elf32_vfp11_erratum_list *)
5312 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5314 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5316 newerr->u.v.branch = branch;
5317 newerr->u.v.id = hash_table->num_vfp11_fixes;
5318 branch->u.b.veneer = newerr;
5320 newerr->next = sec_data->erratumlist;
5321 sec_data->erratumlist = newerr;
5323 /* A symbol for the return from the veneer. */
5324 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5325 hash_table->num_vfp11_fixes);
5327 myh = elf_link_hash_lookup
5328 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5335 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5336 branch_sec, val, NULL, TRUE, FALSE, &bh);
5338 myh = (struct elf_link_hash_entry *) bh;
5339 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5340 myh->forced_local = 1;
5344 /* Generate a mapping symbol for the veneer section, and explicitly add an
5345 entry for that symbol to the code/data map for the section. */
5346 if (hash_table->vfp11_erratum_glue_size == 0)
5349 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5350 ever requires this erratum fix. */
5351 _bfd_generic_link_add_one_symbol (link_info,
5352 hash_table->bfd_of_glue_owner, "$a",
5353 BSF_LOCAL, s, 0, NULL,
5356 myh = (struct elf_link_hash_entry *) bh;
5357 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5358 myh->forced_local = 1;
5360 /* The elf32_arm_init_maps function only cares about symbols from input
5361 BFDs. We must make a note of this generated mapping symbol
5362 ourselves so that code byteswapping works properly in
5363 elf32_arm_write_section. */
5364 elf32_arm_section_map_add (s, 'a', 0);
5367 s->size += VFP11_ERRATUM_VENEER_SIZE;
5368 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5369 hash_table->num_vfp11_fixes++;
5371 /* The offset of the veneer. */
5375 #define ARM_GLUE_SECTION_FLAGS \
5376 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5377 | SEC_READONLY | SEC_LINKER_CREATED)
5379 /* Create a fake section for use by the ARM backend of the linker. */
5382 arm_make_glue_section (bfd * abfd, const char * name)
5386 sec = bfd_get_section_by_name (abfd, name);
5391 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5394 || !bfd_set_section_alignment (abfd, sec, 2))
5397 /* Set the gc mark to prevent the section from being removed by garbage
5398 collection, despite the fact that no relocs refer to this section. */
5404 /* Add the glue sections to ABFD. This function is called from the
5405 linker scripts in ld/emultempl/{armelf}.em. */
5408 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5409 struct bfd_link_info *info)
5411 /* If we are only performing a partial
5412 link do not bother adding the glue. */
5413 if (info->relocatable)
5416 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5417 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5418 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5419 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5422 /* Select a BFD to be used to hold the sections used by the glue code.
5423 This function is called from the linker scripts in ld/emultempl/
5427 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5429 struct elf32_arm_link_hash_table *globals;
5431 /* If we are only performing a partial link
5432 do not bother getting a bfd to hold the glue. */
5433 if (info->relocatable)
5436 /* Make sure we don't attach the glue sections to a dynamic object. */
5437 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5439 globals = elf32_arm_hash_table (info);
5440 BFD_ASSERT (globals != NULL);
5442 if (globals->bfd_of_glue_owner != NULL)
5445 /* Save the bfd for later use. */
5446 globals->bfd_of_glue_owner = abfd;
5452 check_use_blx (struct elf32_arm_link_hash_table *globals)
5454 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5456 globals->use_blx = 1;
5460 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5461 struct bfd_link_info *link_info)
5463 Elf_Internal_Shdr *symtab_hdr;
5464 Elf_Internal_Rela *internal_relocs = NULL;
5465 Elf_Internal_Rela *irel, *irelend;
5466 bfd_byte *contents = NULL;
5469 struct elf32_arm_link_hash_table *globals;
5471 /* If we are only performing a partial link do not bother
5472 to construct any glue. */
5473 if (link_info->relocatable)
5476 /* Here we have a bfd that is to be included on the link. We have a
5477 hook to do reloc rummaging, before section sizes are nailed down. */
5478 globals = elf32_arm_hash_table (link_info);
5479 BFD_ASSERT (globals != NULL);
5481 check_use_blx (globals);
5483 if (globals->byteswap_code && !bfd_big_endian (abfd))
5485 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5490 /* PR 5398: If we have not decided to include any loadable sections in
5491 the output then we will not have a glue owner bfd. This is OK, it
5492 just means that there is nothing else for us to do here. */
5493 if (globals->bfd_of_glue_owner == NULL)
5496 /* Rummage around all the relocs and map the glue vectors. */
5497 sec = abfd->sections;
5502 for (; sec != NULL; sec = sec->next)
5504 if (sec->reloc_count == 0)
5507 if ((sec->flags & SEC_EXCLUDE) != 0)
5510 symtab_hdr = & elf_symtab_hdr (abfd);
5512 /* Load the relocs. */
5514 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5516 if (internal_relocs == NULL)
5519 irelend = internal_relocs + sec->reloc_count;
5520 for (irel = internal_relocs; irel < irelend; irel++)
5523 unsigned long r_index;
5525 struct elf_link_hash_entry *h;
5527 r_type = ELF32_R_TYPE (irel->r_info);
5528 r_index = ELF32_R_SYM (irel->r_info);
5530 /* These are the only relocation types we care about. */
5531 if ( r_type != R_ARM_PC24
5532 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5535 /* Get the section contents if we haven't done so already. */
5536 if (contents == NULL)
5538 /* Get cached copy if it exists. */
5539 if (elf_section_data (sec)->this_hdr.contents != NULL)
5540 contents = elf_section_data (sec)->this_hdr.contents;
5543 /* Go get them off disk. */
5544 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5549 if (r_type == R_ARM_V4BX)
5553 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5554 record_arm_bx_glue (link_info, reg);
5558 /* If the relocation is not against a symbol it cannot concern us. */
5561 /* We don't care about local symbols. */
5562 if (r_index < symtab_hdr->sh_info)
5565 /* This is an external symbol. */
5566 r_index -= symtab_hdr->sh_info;
5567 h = (struct elf_link_hash_entry *)
5568 elf_sym_hashes (abfd)[r_index];
5570 /* If the relocation is against a static symbol it must be within
5571 the current section and so cannot be a cross ARM/Thumb relocation. */
5575 /* If the call will go through a PLT entry then we do not need
5577 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5583 /* This one is a call from arm code. We need to look up
5584 the target of the call. If it is a thumb target, we
5586 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5587 record_arm_to_thumb_glue (link_info, h);
5595 if (contents != NULL
5596 && elf_section_data (sec)->this_hdr.contents != contents)
5600 if (internal_relocs != NULL
5601 && elf_section_data (sec)->relocs != internal_relocs)
5602 free (internal_relocs);
5603 internal_relocs = NULL;
5609 if (contents != NULL
5610 && elf_section_data (sec)->this_hdr.contents != contents)
5612 if (internal_relocs != NULL
5613 && elf_section_data (sec)->relocs != internal_relocs)
5614 free (internal_relocs);
5621 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5624 bfd_elf32_arm_init_maps (bfd *abfd)
5626 Elf_Internal_Sym *isymbuf;
5627 Elf_Internal_Shdr *hdr;
5628 unsigned int i, localsyms;
5630 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5631 if (! is_arm_elf (abfd))
5634 if ((abfd->flags & DYNAMIC) != 0)
5637 hdr = & elf_symtab_hdr (abfd);
5638 localsyms = hdr->sh_info;
5640 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5641 should contain the number of local symbols, which should come before any
5642 global symbols. Mapping symbols are always local. */
5643 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5646 /* No internal symbols read? Skip this BFD. */
5647 if (isymbuf == NULL)
5650 for (i = 0; i < localsyms; i++)
5652 Elf_Internal_Sym *isym = &isymbuf[i];
5653 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5657 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5659 name = bfd_elf_string_from_elf_section (abfd,
5660 hdr->sh_link, isym->st_name);
5662 if (bfd_is_arm_special_symbol_name (name,
5663 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5664 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5670 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5671 say what they wanted. */
5674 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5676 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5677 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5679 if (globals == NULL)
5682 if (globals->fix_cortex_a8 == -1)
5684 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5685 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5686 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5687 || out_attr[Tag_CPU_arch_profile].i == 0))
5688 globals->fix_cortex_a8 = 1;
5690 globals->fix_cortex_a8 = 0;
5696 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5698 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5699 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5701 if (globals == NULL)
5703 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5704 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5706 switch (globals->vfp11_fix)
5708 case BFD_ARM_VFP11_FIX_DEFAULT:
5709 case BFD_ARM_VFP11_FIX_NONE:
5710 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5714 /* Give a warning, but do as the user requests anyway. */
5715 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5716 "workaround is not necessary for target architecture"), obfd);
5719 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5720 /* For earlier architectures, we might need the workaround, but do not
5721 enable it by default. If users is running with broken hardware, they
5722 must enable the erratum fix explicitly. */
5723 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5727 enum bfd_arm_vfp11_pipe
5735 /* Return a VFP register number. This is encoded as RX:X for single-precision
5736 registers, or X:RX for double-precision registers, where RX is the group of
5737 four bits in the instruction encoding and X is the single extension bit.
5738 RX and X fields are specified using their lowest (starting) bit. The return
5741 0...31: single-precision registers s0...s31
5742 32...63: double-precision registers d0...d31.
5744 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5745 encounter VFP3 instructions, so we allow the full range for DP registers. */
5748 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5752 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5754 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5757 /* Set bits in *WMASK according to a register number REG as encoded by
5758 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5761 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5766 *wmask |= 3 << ((reg - 32) * 2);
5769 /* Return TRUE if WMASK overwrites anything in REGS. */
5772 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5776 for (i = 0; i < numregs; i++)
5778 unsigned int reg = regs[i];
5780 if (reg < 32 && (wmask & (1 << reg)) != 0)
5788 if ((wmask & (3 << (reg * 2))) != 0)
5795 /* In this function, we're interested in two things: finding input registers
5796 for VFP data-processing instructions, and finding the set of registers which
5797 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5798 hold the written set, so FLDM etc. are easy to deal with (we're only
5799 interested in 32 SP registers or 16 dp registers, due to the VFP version
5800 implemented by the chip in question). DP registers are marked by setting
5801 both SP registers in the write mask). */
5803 static enum bfd_arm_vfp11_pipe
5804 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5807 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5808 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5810 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5813 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5814 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5816 pqrs = ((insn & 0x00800000) >> 20)
5817 | ((insn & 0x00300000) >> 19)
5818 | ((insn & 0x00000040) >> 6);
5822 case 0: /* fmac[sd]. */
5823 case 1: /* fnmac[sd]. */
5824 case 2: /* fmsc[sd]. */
5825 case 3: /* fnmsc[sd]. */
5827 bfd_arm_vfp11_write_mask (destmask, fd);
5829 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5834 case 4: /* fmul[sd]. */
5835 case 5: /* fnmul[sd]. */
5836 case 6: /* fadd[sd]. */
5837 case 7: /* fsub[sd]. */
5841 case 8: /* fdiv[sd]. */
5844 bfd_arm_vfp11_write_mask (destmask, fd);
5845 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5850 case 15: /* extended opcode. */
5852 unsigned int extn = ((insn >> 15) & 0x1e)
5853 | ((insn >> 7) & 1);
5857 case 0: /* fcpy[sd]. */
5858 case 1: /* fabs[sd]. */
5859 case 2: /* fneg[sd]. */
5860 case 8: /* fcmp[sd]. */
5861 case 9: /* fcmpe[sd]. */
5862 case 10: /* fcmpz[sd]. */
5863 case 11: /* fcmpez[sd]. */
5864 case 16: /* fuito[sd]. */
5865 case 17: /* fsito[sd]. */
5866 case 24: /* ftoui[sd]. */
5867 case 25: /* ftouiz[sd]. */
5868 case 26: /* ftosi[sd]. */
5869 case 27: /* ftosiz[sd]. */
5870 /* These instructions will not bounce due to underflow. */
5875 case 3: /* fsqrt[sd]. */
5876 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5877 registers to cause the erratum in previous instructions. */
5878 bfd_arm_vfp11_write_mask (destmask, fd);
5882 case 15: /* fcvt{ds,sd}. */
5886 bfd_arm_vfp11_write_mask (destmask, fd);
5888 /* Only FCVTSD can underflow. */
5889 if ((insn & 0x100) != 0)
5908 /* Two-register transfer. */
5909 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5911 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5913 if ((insn & 0x100000) == 0)
5916 bfd_arm_vfp11_write_mask (destmask, fm);
5919 bfd_arm_vfp11_write_mask (destmask, fm);
5920 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5926 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5928 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5929 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5933 case 0: /* Two-reg transfer. We should catch these above. */
5936 case 2: /* fldm[sdx]. */
5940 unsigned int i, offset = insn & 0xff;
5945 for (i = fd; i < fd + offset; i++)
5946 bfd_arm_vfp11_write_mask (destmask, i);
5950 case 4: /* fld[sd]. */
5952 bfd_arm_vfp11_write_mask (destmask, fd);
5961 /* Single-register transfer. Note L==0. */
5962 else if ((insn & 0x0f100e10) == 0x0e000a10)
5964 unsigned int opcode = (insn >> 21) & 7;
5965 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5969 case 0: /* fmsr/fmdlr. */
5970 case 1: /* fmdhr. */
5971 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5972 destination register. I don't know if this is exactly right,
5973 but it is the conservative choice. */
5974 bfd_arm_vfp11_write_mask (destmask, fn);
5988 static int elf32_arm_compare_mapping (const void * a, const void * b);
5991 /* Look for potentially-troublesome code sequences which might trigger the
5992 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5993 (available from ARM) for details of the erratum. A short version is
5994 described in ld.texinfo. */
5997 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6000 bfd_byte *contents = NULL;
6002 int regs[3], numregs = 0;
6003 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6004 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6006 if (globals == NULL)
6009 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6010 The states transition as follows:
6012 0 -> 1 (vector) or 0 -> 2 (scalar)
6013 A VFP FMAC-pipeline instruction has been seen. Fill
6014 regs[0]..regs[numregs-1] with its input operands. Remember this
6015 instruction in 'first_fmac'.
6018 Any instruction, except for a VFP instruction which overwrites
6023 A VFP instruction has been seen which overwrites any of regs[*].
6024 We must make a veneer! Reset state to 0 before examining next
6028 If we fail to match anything in state 2, reset to state 0 and reset
6029 the instruction pointer to the instruction after 'first_fmac'.
6031 If the VFP11 vector mode is in use, there must be at least two unrelated
6032 instructions between anti-dependent VFP11 instructions to properly avoid
6033 triggering the erratum, hence the use of the extra state 1. */
6035 /* If we are only performing a partial link do not bother
6036 to construct any glue. */
6037 if (link_info->relocatable)
6040 /* Skip if this bfd does not correspond to an ELF image. */
6041 if (! is_arm_elf (abfd))
6044 /* We should have chosen a fix type by the time we get here. */
6045 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6047 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6050 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6051 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6054 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6056 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6057 struct _arm_elf_section_data *sec_data;
6059 /* If we don't have executable progbits, we're not interested in this
6060 section. Also skip if section is to be excluded. */
6061 if (elf_section_type (sec) != SHT_PROGBITS
6062 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6063 || (sec->flags & SEC_EXCLUDE) != 0
6064 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6065 || sec->output_section == bfd_abs_section_ptr
6066 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6069 sec_data = elf32_arm_section_data (sec);
6071 if (sec_data->mapcount == 0)
6074 if (elf_section_data (sec)->this_hdr.contents != NULL)
6075 contents = elf_section_data (sec)->this_hdr.contents;
6076 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6079 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6080 elf32_arm_compare_mapping);
6082 for (span = 0; span < sec_data->mapcount; span++)
6084 unsigned int span_start = sec_data->map[span].vma;
6085 unsigned int span_end = (span == sec_data->mapcount - 1)
6086 ? sec->size : sec_data->map[span + 1].vma;
6087 char span_type = sec_data->map[span].type;
6089 /* FIXME: Only ARM mode is supported at present. We may need to
6090 support Thumb-2 mode also at some point. */
6091 if (span_type != 'a')
6094 for (i = span_start; i < span_end;)
6096 unsigned int next_i = i + 4;
6097 unsigned int insn = bfd_big_endian (abfd)
6098 ? (contents[i] << 24)
6099 | (contents[i + 1] << 16)
6100 | (contents[i + 2] << 8)
6102 : (contents[i + 3] << 24)
6103 | (contents[i + 2] << 16)
6104 | (contents[i + 1] << 8)
6106 unsigned int writemask = 0;
6107 enum bfd_arm_vfp11_pipe vpipe;
6112 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6114 /* I'm assuming the VFP11 erratum can trigger with denorm
6115 operands on either the FMAC or the DS pipeline. This might
6116 lead to slightly overenthusiastic veneer insertion. */
6117 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6119 state = use_vector ? 1 : 2;
6121 veneer_of_insn = insn;
6127 int other_regs[3], other_numregs;
6128 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6131 if (vpipe != VFP11_BAD
6132 && bfd_arm_vfp11_antidependency (writemask, regs,
6142 int other_regs[3], other_numregs;
6143 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6146 if (vpipe != VFP11_BAD
6147 && bfd_arm_vfp11_antidependency (writemask, regs,
6153 next_i = first_fmac + 4;
6159 abort (); /* Should be unreachable. */
6164 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6165 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6167 elf32_arm_section_data (sec)->erratumcount += 1;
6169 newerr->u.b.vfp_insn = veneer_of_insn;
6174 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6181 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6186 newerr->next = sec_data->erratumlist;
6187 sec_data->erratumlist = newerr;
6196 if (contents != NULL
6197 && elf_section_data (sec)->this_hdr.contents != contents)
6205 if (contents != NULL
6206 && elf_section_data (sec)->this_hdr.contents != contents)
6212 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6213 after sections have been laid out, using specially-named symbols. */
6216 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6217 struct bfd_link_info *link_info)
6220 struct elf32_arm_link_hash_table *globals;
6223 if (link_info->relocatable)
6226 /* Skip if this bfd does not correspond to an ELF image. */
6227 if (! is_arm_elf (abfd))
6230 globals = elf32_arm_hash_table (link_info);
6231 if (globals == NULL)
6234 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6235 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6237 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6239 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6240 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6242 for (; errnode != NULL; errnode = errnode->next)
6244 struct elf_link_hash_entry *myh;
6247 switch (errnode->type)
6249 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6250 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6251 /* Find veneer symbol. */
6252 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6253 errnode->u.b.veneer->u.v.id);
6255 myh = elf_link_hash_lookup
6256 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6259 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6260 "`%s'"), abfd, tmp_name);
6262 vma = myh->root.u.def.section->output_section->vma
6263 + myh->root.u.def.section->output_offset
6264 + myh->root.u.def.value;
6266 errnode->u.b.veneer->vma = vma;
6269 case VFP11_ERRATUM_ARM_VENEER:
6270 case VFP11_ERRATUM_THUMB_VENEER:
6271 /* Find return location. */
6272 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6275 myh = elf_link_hash_lookup
6276 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6279 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6280 "`%s'"), abfd, tmp_name);
6282 vma = myh->root.u.def.section->output_section->vma
6283 + myh->root.u.def.section->output_offset
6284 + myh->root.u.def.value;
6286 errnode->u.v.branch->vma = vma;
6299 /* Set target relocation values needed during linking. */
6302 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6303 struct bfd_link_info *link_info,
6305 char * target2_type,
6308 bfd_arm_vfp11_fix vfp11_fix,
6309 int no_enum_warn, int no_wchar_warn,
6310 int pic_veneer, int fix_cortex_a8)
6312 struct elf32_arm_link_hash_table *globals;
6314 globals = elf32_arm_hash_table (link_info);
6315 if (globals == NULL)
6318 globals->target1_is_rel = target1_is_rel;
6319 if (strcmp (target2_type, "rel") == 0)
6320 globals->target2_reloc = R_ARM_REL32;
6321 else if (strcmp (target2_type, "abs") == 0)
6322 globals->target2_reloc = R_ARM_ABS32;
6323 else if (strcmp (target2_type, "got-rel") == 0)
6324 globals->target2_reloc = R_ARM_GOT_PREL;
6327 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6330 globals->fix_v4bx = fix_v4bx;
6331 globals->use_blx |= use_blx;
6332 globals->vfp11_fix = vfp11_fix;
6333 globals->pic_veneer = pic_veneer;
6334 globals->fix_cortex_a8 = fix_cortex_a8;
6336 BFD_ASSERT (is_arm_elf (output_bfd));
6337 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6338 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6341 /* Replace the target offset of a Thumb bl or b.w instruction. */
6344 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6350 BFD_ASSERT ((offset & 1) == 0);
6352 upper = bfd_get_16 (abfd, insn);
6353 lower = bfd_get_16 (abfd, insn + 2);
6354 reloc_sign = (offset < 0) ? 1 : 0;
6355 upper = (upper & ~(bfd_vma) 0x7ff)
6356 | ((offset >> 12) & 0x3ff)
6357 | (reloc_sign << 10);
6358 lower = (lower & ~(bfd_vma) 0x2fff)
6359 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6360 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6361 | ((offset >> 1) & 0x7ff);
6362 bfd_put_16 (abfd, upper, insn);
6363 bfd_put_16 (abfd, lower, insn + 2);
6366 /* Thumb code calling an ARM function. */
6369 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6373 asection * input_section,
6374 bfd_byte * hit_data,
6377 bfd_signed_vma addend,
6379 char **error_message)
6383 long int ret_offset;
6384 struct elf_link_hash_entry * myh;
6385 struct elf32_arm_link_hash_table * globals;
6387 myh = find_thumb_glue (info, name, error_message);
6391 globals = elf32_arm_hash_table (info);
6392 BFD_ASSERT (globals != NULL);
6393 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6395 my_offset = myh->root.u.def.value;
6397 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6398 THUMB2ARM_GLUE_SECTION_NAME);
6400 BFD_ASSERT (s != NULL);
6401 BFD_ASSERT (s->contents != NULL);
6402 BFD_ASSERT (s->output_section != NULL);
6404 if ((my_offset & 0x01) == 0x01)
6407 && sym_sec->owner != NULL
6408 && !INTERWORK_FLAG (sym_sec->owner))
6410 (*_bfd_error_handler)
6411 (_("%B(%s): warning: interworking not enabled.\n"
6412 " first occurrence: %B: thumb call to arm"),
6413 sym_sec->owner, input_bfd, name);
6419 myh->root.u.def.value = my_offset;
6421 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6422 s->contents + my_offset);
6424 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6425 s->contents + my_offset + 2);
6428 /* Address of destination of the stub. */
6429 ((bfd_signed_vma) val)
6431 /* Offset from the start of the current section
6432 to the start of the stubs. */
6434 /* Offset of the start of this stub from the start of the stubs. */
6436 /* Address of the start of the current section. */
6437 + s->output_section->vma)
6438 /* The branch instruction is 4 bytes into the stub. */
6440 /* ARM branches work from the pc of the instruction + 8. */
6443 put_arm_insn (globals, output_bfd,
6444 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6445 s->contents + my_offset + 4);
6448 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6450 /* Now go back and fix up the original BL insn to point to here. */
6452 /* Address of where the stub is located. */
6453 (s->output_section->vma + s->output_offset + my_offset)
6454 /* Address of where the BL is located. */
6455 - (input_section->output_section->vma + input_section->output_offset
6457 /* Addend in the relocation. */
6459 /* Biassing for PC-relative addressing. */
6462 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6467 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6469 static struct elf_link_hash_entry *
6470 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6477 char ** error_message)
6480 long int ret_offset;
6481 struct elf_link_hash_entry * myh;
6482 struct elf32_arm_link_hash_table * globals;
6484 myh = find_arm_glue (info, name, error_message);
6488 globals = elf32_arm_hash_table (info);
6489 BFD_ASSERT (globals != NULL);
6490 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6492 my_offset = myh->root.u.def.value;
6494 if ((my_offset & 0x01) == 0x01)
6497 && sym_sec->owner != NULL
6498 && !INTERWORK_FLAG (sym_sec->owner))
6500 (*_bfd_error_handler)
6501 (_("%B(%s): warning: interworking not enabled.\n"
6502 " first occurrence: %B: arm call to thumb"),
6503 sym_sec->owner, input_bfd, name);
6507 myh->root.u.def.value = my_offset;
6509 if (info->shared || globals->root.is_relocatable_executable
6510 || globals->pic_veneer)
6512 /* For relocatable objects we can't use absolute addresses,
6513 so construct the address from a relative offset. */
6514 /* TODO: If the offset is small it's probably worth
6515 constructing the address with adds. */
6516 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6517 s->contents + my_offset);
6518 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6519 s->contents + my_offset + 4);
6520 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6521 s->contents + my_offset + 8);
6522 /* Adjust the offset by 4 for the position of the add,
6523 and 8 for the pipeline offset. */
6524 ret_offset = (val - (s->output_offset
6525 + s->output_section->vma
6528 bfd_put_32 (output_bfd, ret_offset,
6529 s->contents + my_offset + 12);
6531 else if (globals->use_blx)
6533 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6534 s->contents + my_offset);
6536 /* It's a thumb address. Add the low order bit. */
6537 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6538 s->contents + my_offset + 4);
6542 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6543 s->contents + my_offset);
6545 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6546 s->contents + my_offset + 4);
6548 /* It's a thumb address. Add the low order bit. */
6549 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6550 s->contents + my_offset + 8);
6556 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6561 /* Arm code calling a Thumb function. */
6564 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6568 asection * input_section,
6569 bfd_byte * hit_data,
6572 bfd_signed_vma addend,
6574 char **error_message)
6576 unsigned long int tmp;
6579 long int ret_offset;
6580 struct elf_link_hash_entry * myh;
6581 struct elf32_arm_link_hash_table * globals;
6583 globals = elf32_arm_hash_table (info);
6584 BFD_ASSERT (globals != NULL);
6585 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6587 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6588 ARM2THUMB_GLUE_SECTION_NAME);
6589 BFD_ASSERT (s != NULL);
6590 BFD_ASSERT (s->contents != NULL);
6591 BFD_ASSERT (s->output_section != NULL);
6593 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6594 sym_sec, val, s, error_message);
6598 my_offset = myh->root.u.def.value;
6599 tmp = bfd_get_32 (input_bfd, hit_data);
6600 tmp = tmp & 0xFF000000;
6602 /* Somehow these are both 4 too far, so subtract 8. */
6603 ret_offset = (s->output_offset
6605 + s->output_section->vma
6606 - (input_section->output_offset
6607 + input_section->output_section->vma
6611 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6613 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6618 /* Populate Arm stub for an exported Thumb function. */
6621 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6623 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6625 struct elf_link_hash_entry * myh;
6626 struct elf32_arm_link_hash_entry *eh;
6627 struct elf32_arm_link_hash_table * globals;
6630 char *error_message;
6632 eh = elf32_arm_hash_entry (h);
6633 /* Allocate stubs for exported Thumb functions on v4t. */
6634 if (eh->export_glue == NULL)
6637 globals = elf32_arm_hash_table (info);
6638 BFD_ASSERT (globals != NULL);
6639 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6641 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6642 ARM2THUMB_GLUE_SECTION_NAME);
6643 BFD_ASSERT (s != NULL);
6644 BFD_ASSERT (s->contents != NULL);
6645 BFD_ASSERT (s->output_section != NULL);
6647 sec = eh->export_glue->root.u.def.section;
6649 BFD_ASSERT (sec->output_section != NULL);
6651 val = eh->export_glue->root.u.def.value + sec->output_offset
6652 + sec->output_section->vma;
6654 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6655 h->root.u.def.section->owner,
6656 globals->obfd, sec, val, s,
6662 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6665 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6670 struct elf32_arm_link_hash_table *globals;
6672 globals = elf32_arm_hash_table (info);
6673 BFD_ASSERT (globals != NULL);
6674 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6676 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6677 ARM_BX_GLUE_SECTION_NAME);
6678 BFD_ASSERT (s != NULL);
6679 BFD_ASSERT (s->contents != NULL);
6680 BFD_ASSERT (s->output_section != NULL);
6682 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6684 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6686 if ((globals->bx_glue_offset[reg] & 1) == 0)
6688 p = s->contents + glue_addr;
6689 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6690 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6691 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6692 globals->bx_glue_offset[reg] |= 1;
6695 return glue_addr + s->output_section->vma + s->output_offset;
6698 /* Generate Arm stubs for exported Thumb symbols. */
6700 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6701 struct bfd_link_info *link_info)
6703 struct elf32_arm_link_hash_table * globals;
6705 if (link_info == NULL)
6706 /* Ignore this if we are not called by the ELF backend linker. */
6709 globals = elf32_arm_hash_table (link_info);
6710 if (globals == NULL)
6713 /* If blx is available then exported Thumb symbols are OK and there is
6715 if (globals->use_blx)
6718 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6722 /* Some relocations map to different relocations depending on the
6723 target. Return the real relocation. */
6726 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6732 if (globals->target1_is_rel)
6738 return globals->target2_reloc;
6745 /* Return the base VMA address which should be subtracted from real addresses
6746 when resolving @dtpoff relocation.
6747 This is PT_TLS segment p_vaddr. */
6750 dtpoff_base (struct bfd_link_info *info)
6752 /* If tls_sec is NULL, we should have signalled an error already. */
6753 if (elf_hash_table (info)->tls_sec == NULL)
6755 return elf_hash_table (info)->tls_sec->vma;
6758 /* Return the relocation value for @tpoff relocation
6759 if STT_TLS virtual address is ADDRESS. */
6762 tpoff (struct bfd_link_info *info, bfd_vma address)
6764 struct elf_link_hash_table *htab = elf_hash_table (info);
6767 /* If tls_sec is NULL, we should have signalled an error already. */
6768 if (htab->tls_sec == NULL)
6770 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6771 return address - htab->tls_sec->vma + base;
6774 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6775 VALUE is the relocation value. */
6777 static bfd_reloc_status_type
6778 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6781 return bfd_reloc_overflow;
6783 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6784 bfd_put_32 (abfd, value, data);
6785 return bfd_reloc_ok;
6788 /* For a given value of n, calculate the value of G_n as required to
6789 deal with group relocations. We return it in the form of an
6790 encoded constant-and-rotation, together with the final residual. If n is
6791 specified as less than zero, then final_residual is filled with the
6792 input value and no further action is performed. */
6795 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6799 bfd_vma encoded_g_n = 0;
6800 bfd_vma residual = value; /* Also known as Y_n. */
6802 for (current_n = 0; current_n <= n; current_n++)
6806 /* Calculate which part of the value to mask. */
6813 /* Determine the most significant bit in the residual and
6814 align the resulting value to a 2-bit boundary. */
6815 for (msb = 30; msb >= 0; msb -= 2)
6816 if (residual & (3 << msb))
6819 /* The desired shift is now (msb - 6), or zero, whichever
6826 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6827 g_n = residual & (0xff << shift);
6828 encoded_g_n = (g_n >> shift)
6829 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6831 /* Calculate the residual for the next time around. */
6835 *final_residual = residual;
6840 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6841 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6844 identify_add_or_sub (bfd_vma insn)
6846 int opcode = insn & 0x1e00000;
6848 if (opcode == 1 << 23) /* ADD */
6851 if (opcode == 1 << 22) /* SUB */
6857 /* Perform a relocation as part of a final link. */
6859 static bfd_reloc_status_type
6860 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6863 asection * input_section,
6864 bfd_byte * contents,
6865 Elf_Internal_Rela * rel,
6867 struct bfd_link_info * info,
6869 const char * sym_name,
6871 struct elf_link_hash_entry * h,
6872 bfd_boolean * unresolved_reloc_p,
6873 char ** error_message)
6875 unsigned long r_type = howto->type;
6876 unsigned long r_symndx;
6877 bfd_byte * hit_data = contents + rel->r_offset;
6878 bfd * dynobj = NULL;
6879 bfd_vma * local_got_offsets;
6880 asection * sgot = NULL;
6881 asection * splt = NULL;
6882 asection * sreloc = NULL;
6884 bfd_signed_vma signed_addend;
6885 struct elf32_arm_link_hash_table * globals;
6887 globals = elf32_arm_hash_table (info);
6888 if (globals == NULL)
6889 return bfd_reloc_notsupported;
6891 BFD_ASSERT (is_arm_elf (input_bfd));
6893 /* Some relocation types map to different relocations depending on the
6894 target. We pick the right one here. */
6895 r_type = arm_real_reloc_type (globals, r_type);
6896 if (r_type != howto->type)
6897 howto = elf32_arm_howto_from_type (r_type);
6899 /* If the start address has been set, then set the EF_ARM_HASENTRY
6900 flag. Setting this more than once is redundant, but the cost is
6901 not too high, and it keeps the code simple.
6903 The test is done here, rather than somewhere else, because the
6904 start address is only set just before the final link commences.
6906 Note - if the user deliberately sets a start address of 0, the
6907 flag will not be set. */
6908 if (bfd_get_start_address (output_bfd) != 0)
6909 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6911 dynobj = elf_hash_table (info)->dynobj;
6914 sgot = bfd_get_section_by_name (dynobj, ".got");
6915 splt = bfd_get_section_by_name (dynobj, ".plt");
6917 local_got_offsets = elf_local_got_offsets (input_bfd);
6918 r_symndx = ELF32_R_SYM (rel->r_info);
6920 if (globals->use_rel)
6922 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6924 if (addend & ((howto->src_mask + 1) >> 1))
6927 signed_addend &= ~ howto->src_mask;
6928 signed_addend |= addend;
6931 signed_addend = addend;
6934 addend = signed_addend = rel->r_addend;
6939 /* We don't need to find a value for this symbol. It's just a
6941 *unresolved_reloc_p = FALSE;
6942 return bfd_reloc_ok;
6945 if (!globals->vxworks_p)
6946 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6950 case R_ARM_ABS32_NOI:
6952 case R_ARM_REL32_NOI:
6958 /* Handle relocations which should use the PLT entry. ABS32/REL32
6959 will use the symbol's value, which may point to a PLT entry, but we
6960 don't need to handle that here. If we created a PLT entry, all
6961 branches in this object should go to it, except if the PLT is too
6962 far away, in which case a long branch stub should be inserted. */
6963 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6964 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6965 && r_type != R_ARM_CALL
6966 && r_type != R_ARM_JUMP24
6967 && r_type != R_ARM_PLT32)
6970 && h->plt.offset != (bfd_vma) -1)
6972 /* If we've created a .plt section, and assigned a PLT entry to
6973 this function, it should not be known to bind locally. If
6974 it were, we would have cleared the PLT entry. */
6975 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6977 value = (splt->output_section->vma
6978 + splt->output_offset
6980 *unresolved_reloc_p = FALSE;
6981 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6982 contents, rel->r_offset, value,
6986 /* When generating a shared object or relocatable executable, these
6987 relocations are copied into the output file to be resolved at
6989 if ((info->shared || globals->root.is_relocatable_executable)
6990 && (input_section->flags & SEC_ALLOC)
6991 && !(globals->vxworks_p
6992 && strcmp (input_section->output_section->name,
6994 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6995 || !SYMBOL_CALLS_LOCAL (info, h))
6996 && (!strstr (input_section->name, STUB_SUFFIX))
6998 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6999 || h->root.type != bfd_link_hash_undefweak)
7000 && r_type != R_ARM_PC24
7001 && r_type != R_ARM_CALL
7002 && r_type != R_ARM_JUMP24
7003 && r_type != R_ARM_PREL31
7004 && r_type != R_ARM_PLT32)
7006 Elf_Internal_Rela outrel;
7008 bfd_boolean skip, relocate;
7010 *unresolved_reloc_p = FALSE;
7014 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
7015 ! globals->use_rel);
7018 return bfd_reloc_notsupported;
7024 outrel.r_addend = addend;
7026 _bfd_elf_section_offset (output_bfd, info, input_section,
7028 if (outrel.r_offset == (bfd_vma) -1)
7030 else if (outrel.r_offset == (bfd_vma) -2)
7031 skip = TRUE, relocate = TRUE;
7032 outrel.r_offset += (input_section->output_section->vma
7033 + input_section->output_offset);
7036 memset (&outrel, 0, sizeof outrel);
7041 || !h->def_regular))
7042 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7047 /* This symbol is local, or marked to become local. */
7048 if (sym_flags == STT_ARM_TFUNC)
7050 if (globals->symbian_p)
7054 /* On Symbian OS, the data segment and text segement
7055 can be relocated independently. Therefore, we
7056 must indicate the segment to which this
7057 relocation is relative. The BPABI allows us to
7058 use any symbol in the right segment; we just use
7059 the section symbol as it is convenient. (We
7060 cannot use the symbol given by "h" directly as it
7061 will not appear in the dynamic symbol table.)
7063 Note that the dynamic linker ignores the section
7064 symbol value, so we don't subtract osec->vma
7065 from the emitted reloc addend. */
7067 osec = sym_sec->output_section;
7069 osec = input_section->output_section;
7070 symbol = elf_section_data (osec)->dynindx;
7073 struct elf_link_hash_table *htab = elf_hash_table (info);
7075 if ((osec->flags & SEC_READONLY) == 0
7076 && htab->data_index_section != NULL)
7077 osec = htab->data_index_section;
7079 osec = htab->text_index_section;
7080 symbol = elf_section_data (osec)->dynindx;
7082 BFD_ASSERT (symbol != 0);
7085 /* On SVR4-ish systems, the dynamic loader cannot
7086 relocate the text and data segments independently,
7087 so the symbol does not matter. */
7089 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7090 if (globals->use_rel)
7093 outrel.r_addend += value;
7096 loc = sreloc->contents;
7097 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7098 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7100 /* If this reloc is against an external symbol, we do not want to
7101 fiddle with the addend. Otherwise, we need to include the symbol
7102 value so that it becomes an addend for the dynamic reloc. */
7104 return bfd_reloc_ok;
7106 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7107 contents, rel->r_offset, value,
7110 else switch (r_type)
7113 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7115 case R_ARM_XPC25: /* Arm BLX instruction. */
7118 case R_ARM_PC24: /* Arm B/BL instruction. */
7121 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7123 if (r_type == R_ARM_XPC25)
7125 /* Check for Arm calling Arm function. */
7126 /* FIXME: Should we translate the instruction into a BL
7127 instruction instead ? */
7128 if (sym_flags != STT_ARM_TFUNC)
7129 (*_bfd_error_handler)
7130 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7132 h ? h->root.root.string : "(local)");
7134 else if (r_type == R_ARM_PC24)
7136 /* Check for Arm calling Thumb function. */
7137 if (sym_flags == STT_ARM_TFUNC)
7139 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7140 output_bfd, input_section,
7141 hit_data, sym_sec, rel->r_offset,
7142 signed_addend, value,
7144 return bfd_reloc_ok;
7146 return bfd_reloc_dangerous;
7150 /* Check if a stub has to be inserted because the
7151 destination is too far or we are changing mode. */
7152 if ( r_type == R_ARM_CALL
7153 || r_type == R_ARM_JUMP24
7154 || r_type == R_ARM_PLT32)
7156 enum elf32_arm_stub_type stub_type = arm_stub_none;
7157 struct elf32_arm_link_hash_entry *hash;
7159 hash = (struct elf32_arm_link_hash_entry *) h;
7160 stub_type = arm_type_of_stub (info, input_section, rel,
7163 input_bfd, sym_name);
7165 if (stub_type != arm_stub_none)
7167 /* The target is out of reach, so redirect the
7168 branch to the local stub for this function. */
7170 stub_entry = elf32_arm_get_stub_entry (input_section,
7174 if (stub_entry != NULL)
7175 value = (stub_entry->stub_offset
7176 + stub_entry->stub_sec->output_offset
7177 + stub_entry->stub_sec->output_section->vma);
7181 /* If the call goes through a PLT entry, make sure to
7182 check distance to the right destination address. */
7185 && h->plt.offset != (bfd_vma) -1)
7187 value = (splt->output_section->vma
7188 + splt->output_offset
7190 *unresolved_reloc_p = FALSE;
7191 /* The PLT entry is in ARM mode, regardless of the
7193 sym_flags = STT_FUNC;
7198 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7200 S is the address of the symbol in the relocation.
7201 P is address of the instruction being relocated.
7202 A is the addend (extracted from the instruction) in bytes.
7204 S is held in 'value'.
7205 P is the base address of the section containing the
7206 instruction plus the offset of the reloc into that
7208 (input_section->output_section->vma +
7209 input_section->output_offset +
7211 A is the addend, converted into bytes, ie:
7214 Note: None of these operations have knowledge of the pipeline
7215 size of the processor, thus it is up to the assembler to
7216 encode this information into the addend. */
7217 value -= (input_section->output_section->vma
7218 + input_section->output_offset);
7219 value -= rel->r_offset;
7220 if (globals->use_rel)
7221 value += (signed_addend << howto->size);
7223 /* RELA addends do not have to be adjusted by howto->size. */
7224 value += signed_addend;
7226 signed_addend = value;
7227 signed_addend >>= howto->rightshift;
7229 /* A branch to an undefined weak symbol is turned into a jump to
7230 the next instruction unless a PLT entry will be created.
7231 Do the same for local undefined symbols (but not for STN_UNDEF).
7232 The jump to the next instruction is optimized as a NOP depending
7233 on the architecture. */
7234 if (h ? (h->root.type == bfd_link_hash_undefweak
7235 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7236 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
7238 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7240 if (arch_has_arm_nop (globals))
7241 value |= 0x0320f000;
7243 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7247 /* Perform a signed range check. */
7248 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7249 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7250 return bfd_reloc_overflow;
7252 addend = (value & 2);
7254 value = (signed_addend & howto->dst_mask)
7255 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7257 if (r_type == R_ARM_CALL)
7259 /* Set the H bit in the BLX instruction. */
7260 if (sym_flags == STT_ARM_TFUNC)
7265 value &= ~(bfd_vma)(1 << 24);
7268 /* Select the correct instruction (BL or BLX). */
7269 /* Only if we are not handling a BL to a stub. In this
7270 case, mode switching is performed by the stub. */
7271 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7275 value &= ~(bfd_vma)(1 << 28);
7285 if (sym_flags == STT_ARM_TFUNC)
7289 case R_ARM_ABS32_NOI:
7295 if (sym_flags == STT_ARM_TFUNC)
7297 value -= (input_section->output_section->vma
7298 + input_section->output_offset + rel->r_offset);
7301 case R_ARM_REL32_NOI:
7303 value -= (input_section->output_section->vma
7304 + input_section->output_offset + rel->r_offset);
7308 value -= (input_section->output_section->vma
7309 + input_section->output_offset + rel->r_offset);
7310 value += signed_addend;
7311 if (! h || h->root.type != bfd_link_hash_undefweak)
7313 /* Check for overflow. */
7314 if ((value ^ (value >> 1)) & (1 << 30))
7315 return bfd_reloc_overflow;
7317 value &= 0x7fffffff;
7318 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7319 if (sym_flags == STT_ARM_TFUNC)
7324 bfd_put_32 (input_bfd, value, hit_data);
7325 return bfd_reloc_ok;
7330 /* There is no way to tell whether the user intended to use a signed or
7331 unsigned addend. When checking for overflow we accept either,
7332 as specified by the AAELF. */
7333 if ((long) value > 0xff || (long) value < -0x80)
7334 return bfd_reloc_overflow;
7336 bfd_put_8 (input_bfd, value, hit_data);
7337 return bfd_reloc_ok;
7342 /* See comment for R_ARM_ABS8. */
7343 if ((long) value > 0xffff || (long) value < -0x8000)
7344 return bfd_reloc_overflow;
7346 bfd_put_16 (input_bfd, value, hit_data);
7347 return bfd_reloc_ok;
7349 case R_ARM_THM_ABS5:
7350 /* Support ldr and str instructions for the thumb. */
7351 if (globals->use_rel)
7353 /* Need to refetch addend. */
7354 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7355 /* ??? Need to determine shift amount from operand size. */
7356 addend >>= howto->rightshift;
7360 /* ??? Isn't value unsigned? */
7361 if ((long) value > 0x1f || (long) value < -0x10)
7362 return bfd_reloc_overflow;
7364 /* ??? Value needs to be properly shifted into place first. */
7365 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7366 bfd_put_16 (input_bfd, value, hit_data);
7367 return bfd_reloc_ok;
7369 case R_ARM_THM_ALU_PREL_11_0:
7370 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7373 bfd_signed_vma relocation;
7375 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7376 | bfd_get_16 (input_bfd, hit_data + 2);
7378 if (globals->use_rel)
7380 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7381 | ((insn & (1 << 26)) >> 15);
7382 if (insn & 0xf00000)
7383 signed_addend = -signed_addend;
7386 relocation = value + signed_addend;
7387 relocation -= (input_section->output_section->vma
7388 + input_section->output_offset
7391 value = abs (relocation);
7393 if (value >= 0x1000)
7394 return bfd_reloc_overflow;
7396 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7397 | ((value & 0x700) << 4)
7398 | ((value & 0x800) << 15);
7402 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7403 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7405 return bfd_reloc_ok;
7409 /* PR 10073: This reloc is not generated by the GNU toolchain,
7410 but it is supported for compatibility with third party libraries
7411 generated by other compilers, specifically the ARM/IAR. */
7414 bfd_signed_vma relocation;
7416 insn = bfd_get_16 (input_bfd, hit_data);
7418 if (globals->use_rel)
7419 addend = (insn & 0x00ff) << 2;
7421 relocation = value + addend;
7422 relocation -= (input_section->output_section->vma
7423 + input_section->output_offset
7426 value = abs (relocation);
7428 /* We do not check for overflow of this reloc. Although strictly
7429 speaking this is incorrect, it appears to be necessary in order
7430 to work with IAR generated relocs. Since GCC and GAS do not
7431 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7432 a problem for them. */
7435 insn = (insn & 0xff00) | (value >> 2);
7437 bfd_put_16 (input_bfd, insn, hit_data);
7439 return bfd_reloc_ok;
7442 case R_ARM_THM_PC12:
7443 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7446 bfd_signed_vma relocation;
7448 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7449 | bfd_get_16 (input_bfd, hit_data + 2);
7451 if (globals->use_rel)
7453 signed_addend = insn & 0xfff;
7454 if (!(insn & (1 << 23)))
7455 signed_addend = -signed_addend;
7458 relocation = value + signed_addend;
7459 relocation -= (input_section->output_section->vma
7460 + input_section->output_offset
7463 value = abs (relocation);
7465 if (value >= 0x1000)
7466 return bfd_reloc_overflow;
7468 insn = (insn & 0xff7ff000) | value;
7469 if (relocation >= 0)
7472 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7473 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7475 return bfd_reloc_ok;
7478 case R_ARM_THM_XPC22:
7479 case R_ARM_THM_CALL:
7480 case R_ARM_THM_JUMP24:
7481 /* Thumb BL (branch long instruction). */
7485 bfd_boolean overflow = FALSE;
7486 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7487 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7488 bfd_signed_vma reloc_signed_max;
7489 bfd_signed_vma reloc_signed_min;
7491 bfd_signed_vma signed_check;
7493 const int thumb2 = using_thumb2 (globals);
7495 /* A branch to an undefined weak symbol is turned into a jump to
7496 the next instruction unless a PLT entry will be created.
7497 The jump to the next instruction is optimized as a NOP.W for
7498 Thumb-2 enabled architectures. */
7499 if (h && h->root.type == bfd_link_hash_undefweak
7500 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7502 if (arch_has_thumb2_nop (globals))
7504 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7505 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7509 bfd_put_16 (input_bfd, 0xe000, hit_data);
7510 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7512 return bfd_reloc_ok;
7515 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7516 with Thumb-1) involving the J1 and J2 bits. */
7517 if (globals->use_rel)
7519 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7520 bfd_vma upper = upper_insn & 0x3ff;
7521 bfd_vma lower = lower_insn & 0x7ff;
7522 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7523 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7524 bfd_vma i1 = j1 ^ s ? 0 : 1;
7525 bfd_vma i2 = j2 ^ s ? 0 : 1;
7527 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7529 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7531 signed_addend = addend;
7534 if (r_type == R_ARM_THM_XPC22)
7536 /* Check for Thumb to Thumb call. */
7537 /* FIXME: Should we translate the instruction into a BL
7538 instruction instead ? */
7539 if (sym_flags == STT_ARM_TFUNC)
7540 (*_bfd_error_handler)
7541 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7543 h ? h->root.root.string : "(local)");
7547 /* If it is not a call to Thumb, assume call to Arm.
7548 If it is a call relative to a section name, then it is not a
7549 function call at all, but rather a long jump. Calls through
7550 the PLT do not require stubs. */
7551 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7552 && (h == NULL || splt == NULL
7553 || h->plt.offset == (bfd_vma) -1))
7555 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7557 /* Convert BL to BLX. */
7558 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7560 else if (( r_type != R_ARM_THM_CALL)
7561 && (r_type != R_ARM_THM_JUMP24))
7563 if (elf32_thumb_to_arm_stub
7564 (info, sym_name, input_bfd, output_bfd, input_section,
7565 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7567 return bfd_reloc_ok;
7569 return bfd_reloc_dangerous;
7572 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7573 && r_type == R_ARM_THM_CALL)
7575 /* Make sure this is a BL. */
7576 lower_insn |= 0x1800;
7580 enum elf32_arm_stub_type stub_type = arm_stub_none;
7581 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7583 /* Check if a stub has to be inserted because the destination
7585 struct elf32_arm_stub_hash_entry *stub_entry;
7586 struct elf32_arm_link_hash_entry *hash;
7588 hash = (struct elf32_arm_link_hash_entry *) h;
7590 stub_type = arm_type_of_stub (info, input_section, rel,
7591 &sym_flags, hash, value, sym_sec,
7592 input_bfd, sym_name);
7594 if (stub_type != arm_stub_none)
7596 /* The target is out of reach or we are changing modes, so
7597 redirect the branch to the local stub for this
7599 stub_entry = elf32_arm_get_stub_entry (input_section,
7603 if (stub_entry != NULL)
7604 value = (stub_entry->stub_offset
7605 + stub_entry->stub_sec->output_offset
7606 + stub_entry->stub_sec->output_section->vma);
7608 /* If this call becomes a call to Arm, force BLX. */
7609 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7612 && !arm_stub_is_thumb (stub_entry->stub_type))
7613 || (sym_flags != STT_ARM_TFUNC))
7614 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7619 /* Handle calls via the PLT. */
7620 if (stub_type == arm_stub_none
7623 && h->plt.offset != (bfd_vma) -1)
7625 value = (splt->output_section->vma
7626 + splt->output_offset
7629 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7631 /* If the Thumb BLX instruction is available, convert
7632 the BL to a BLX instruction to call the ARM-mode
7634 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7635 sym_flags = STT_FUNC;
7639 /* Target the Thumb stub before the ARM PLT entry. */
7640 value -= PLT_THUMB_STUB_SIZE;
7641 sym_flags = STT_ARM_TFUNC;
7643 *unresolved_reloc_p = FALSE;
7646 relocation = value + signed_addend;
7648 relocation -= (input_section->output_section->vma
7649 + input_section->output_offset
7652 check = relocation >> howto->rightshift;
7654 /* If this is a signed value, the rightshift just dropped
7655 leading 1 bits (assuming twos complement). */
7656 if ((bfd_signed_vma) relocation >= 0)
7657 signed_check = check;
7659 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7661 /* Calculate the permissable maximum and minimum values for
7662 this relocation according to whether we're relocating for
7664 bitsize = howto->bitsize;
7667 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7668 reloc_signed_min = ~reloc_signed_max;
7670 /* Assumes two's complement. */
7671 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7674 if ((lower_insn & 0x5000) == 0x4000)
7675 /* For a BLX instruction, make sure that the relocation is rounded up
7676 to a word boundary. This follows the semantics of the instruction
7677 which specifies that bit 1 of the target address will come from bit
7678 1 of the base address. */
7679 relocation = (relocation + 2) & ~ 3;
7681 /* Put RELOCATION back into the insn. Assumes two's complement.
7682 We use the Thumb-2 encoding, which is safe even if dealing with
7683 a Thumb-1 instruction by virtue of our overflow check above. */
7684 reloc_sign = (signed_check < 0) ? 1 : 0;
7685 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7686 | ((relocation >> 12) & 0x3ff)
7687 | (reloc_sign << 10);
7688 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7689 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7690 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7691 | ((relocation >> 1) & 0x7ff);
7693 /* Put the relocated value back in the object file: */
7694 bfd_put_16 (input_bfd, upper_insn, hit_data);
7695 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7697 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7701 case R_ARM_THM_JUMP19:
7702 /* Thumb32 conditional branch instruction. */
7705 bfd_boolean overflow = FALSE;
7706 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7707 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7708 bfd_signed_vma reloc_signed_max = 0xffffe;
7709 bfd_signed_vma reloc_signed_min = -0x100000;
7710 bfd_signed_vma signed_check;
7712 /* Need to refetch the addend, reconstruct the top three bits,
7713 and squish the two 11 bit pieces together. */
7714 if (globals->use_rel)
7716 bfd_vma S = (upper_insn & 0x0400) >> 10;
7717 bfd_vma upper = (upper_insn & 0x003f);
7718 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7719 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7720 bfd_vma lower = (lower_insn & 0x07ff);
7725 upper -= 0x0100; /* Sign extend. */
7727 addend = (upper << 12) | (lower << 1);
7728 signed_addend = addend;
7731 /* Handle calls via the PLT. */
7732 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7734 value = (splt->output_section->vma
7735 + splt->output_offset
7737 /* Target the Thumb stub before the ARM PLT entry. */
7738 value -= PLT_THUMB_STUB_SIZE;
7739 *unresolved_reloc_p = FALSE;
7742 /* ??? Should handle interworking? GCC might someday try to
7743 use this for tail calls. */
7745 relocation = value + signed_addend;
7746 relocation -= (input_section->output_section->vma
7747 + input_section->output_offset
7749 signed_check = (bfd_signed_vma) relocation;
7751 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7754 /* Put RELOCATION back into the insn. */
7756 bfd_vma S = (relocation & 0x00100000) >> 20;
7757 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7758 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7759 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7760 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7762 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7763 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7766 /* Put the relocated value back in the object file: */
7767 bfd_put_16 (input_bfd, upper_insn, hit_data);
7768 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7770 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7773 case R_ARM_THM_JUMP11:
7774 case R_ARM_THM_JUMP8:
7775 case R_ARM_THM_JUMP6:
7776 /* Thumb B (branch) instruction). */
7778 bfd_signed_vma relocation;
7779 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7780 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7781 bfd_signed_vma signed_check;
7783 /* CZB cannot jump backward. */
7784 if (r_type == R_ARM_THM_JUMP6)
7785 reloc_signed_min = 0;
7787 if (globals->use_rel)
7789 /* Need to refetch addend. */
7790 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7791 if (addend & ((howto->src_mask + 1) >> 1))
7794 signed_addend &= ~ howto->src_mask;
7795 signed_addend |= addend;
7798 signed_addend = addend;
7799 /* The value in the insn has been right shifted. We need to
7800 undo this, so that we can perform the address calculation
7801 in terms of bytes. */
7802 signed_addend <<= howto->rightshift;
7804 relocation = value + signed_addend;
7806 relocation -= (input_section->output_section->vma
7807 + input_section->output_offset
7810 relocation >>= howto->rightshift;
7811 signed_check = relocation;
7813 if (r_type == R_ARM_THM_JUMP6)
7814 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7816 relocation &= howto->dst_mask;
7817 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7819 bfd_put_16 (input_bfd, relocation, hit_data);
7821 /* Assumes two's complement. */
7822 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7823 return bfd_reloc_overflow;
7825 return bfd_reloc_ok;
7828 case R_ARM_ALU_PCREL7_0:
7829 case R_ARM_ALU_PCREL15_8:
7830 case R_ARM_ALU_PCREL23_15:
7835 insn = bfd_get_32 (input_bfd, hit_data);
7836 if (globals->use_rel)
7838 /* Extract the addend. */
7839 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7840 signed_addend = addend;
7842 relocation = value + signed_addend;
7844 relocation -= (input_section->output_section->vma
7845 + input_section->output_offset
7847 insn = (insn & ~0xfff)
7848 | ((howto->bitpos << 7) & 0xf00)
7849 | ((relocation >> howto->bitpos) & 0xff);
7850 bfd_put_32 (input_bfd, value, hit_data);
7852 return bfd_reloc_ok;
7854 case R_ARM_GNU_VTINHERIT:
7855 case R_ARM_GNU_VTENTRY:
7856 return bfd_reloc_ok;
7858 case R_ARM_GOTOFF32:
7859 /* Relocation is relative to the start of the
7860 global offset table. */
7862 BFD_ASSERT (sgot != NULL);
7864 return bfd_reloc_notsupported;
7866 /* If we are addressing a Thumb function, we need to adjust the
7867 address by one, so that attempts to call the function pointer will
7868 correctly interpret it as Thumb code. */
7869 if (sym_flags == STT_ARM_TFUNC)
7872 /* Note that sgot->output_offset is not involved in this
7873 calculation. We always want the start of .got. If we
7874 define _GLOBAL_OFFSET_TABLE in a different way, as is
7875 permitted by the ABI, we might have to change this
7877 value -= sgot->output_section->vma;
7878 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7879 contents, rel->r_offset, value,
7883 /* Use global offset table as symbol value. */
7884 BFD_ASSERT (sgot != NULL);
7887 return bfd_reloc_notsupported;
7889 *unresolved_reloc_p = FALSE;
7890 value = sgot->output_section->vma;
7891 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7892 contents, rel->r_offset, value,
7896 case R_ARM_GOT_PREL:
7897 /* Relocation is to the entry for this symbol in the
7898 global offset table. */
7900 return bfd_reloc_notsupported;
7907 off = h->got.offset;
7908 BFD_ASSERT (off != (bfd_vma) -1);
7909 dyn = globals->root.dynamic_sections_created;
7911 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7913 && SYMBOL_REFERENCES_LOCAL (info, h))
7914 || (ELF_ST_VISIBILITY (h->other)
7915 && h->root.type == bfd_link_hash_undefweak))
7917 /* This is actually a static link, or it is a -Bsymbolic link
7918 and the symbol is defined locally. We must initialize this
7919 entry in the global offset table. Since the offset must
7920 always be a multiple of 4, we use the least significant bit
7921 to record whether we have initialized it already.
7923 When doing a dynamic link, we create a .rel(a).got relocation
7924 entry to initialize the value. This is done in the
7925 finish_dynamic_symbol routine. */
7930 /* If we are addressing a Thumb function, we need to
7931 adjust the address by one, so that attempts to
7932 call the function pointer will correctly
7933 interpret it as Thumb code. */
7934 if (sym_flags == STT_ARM_TFUNC)
7937 bfd_put_32 (output_bfd, value, sgot->contents + off);
7942 *unresolved_reloc_p = FALSE;
7944 value = sgot->output_offset + off;
7950 BFD_ASSERT (local_got_offsets != NULL &&
7951 local_got_offsets[r_symndx] != (bfd_vma) -1);
7953 off = local_got_offsets[r_symndx];
7955 /* The offset must always be a multiple of 4. We use the
7956 least significant bit to record whether we have already
7957 generated the necessary reloc. */
7962 /* If we are addressing a Thumb function, we need to
7963 adjust the address by one, so that attempts to
7964 call the function pointer will correctly
7965 interpret it as Thumb code. */
7966 if (sym_flags == STT_ARM_TFUNC)
7969 if (globals->use_rel)
7970 bfd_put_32 (output_bfd, value, sgot->contents + off);
7975 Elf_Internal_Rela outrel;
7978 srelgot = (bfd_get_section_by_name
7979 (dynobj, RELOC_SECTION (globals, ".got")));
7980 BFD_ASSERT (srelgot != NULL);
7982 outrel.r_addend = addend + value;
7983 outrel.r_offset = (sgot->output_section->vma
7984 + sgot->output_offset
7986 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7987 loc = srelgot->contents;
7988 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7989 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7992 local_got_offsets[r_symndx] |= 1;
7995 value = sgot->output_offset + off;
7997 if (r_type != R_ARM_GOT32)
7998 value += sgot->output_section->vma;
8000 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8001 contents, rel->r_offset, value,
8004 case R_ARM_TLS_LDO32:
8005 value = value - dtpoff_base (info);
8007 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8008 contents, rel->r_offset, value,
8011 case R_ARM_TLS_LDM32:
8015 if (globals->sgot == NULL)
8018 off = globals->tls_ldm_got.offset;
8024 /* If we don't know the module number, create a relocation
8028 Elf_Internal_Rela outrel;
8031 if (globals->srelgot == NULL)
8034 outrel.r_addend = 0;
8035 outrel.r_offset = (globals->sgot->output_section->vma
8036 + globals->sgot->output_offset + off);
8037 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
8039 if (globals->use_rel)
8040 bfd_put_32 (output_bfd, outrel.r_addend,
8041 globals->sgot->contents + off);
8043 loc = globals->srelgot->contents;
8044 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
8045 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8048 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
8050 globals->tls_ldm_got.offset |= 1;
8053 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8054 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8056 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8057 contents, rel->r_offset, value,
8061 case R_ARM_TLS_GD32:
8062 case R_ARM_TLS_IE32:
8068 if (globals->sgot == NULL)
8075 dyn = globals->root.dynamic_sections_created;
8076 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8078 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8080 *unresolved_reloc_p = FALSE;
8083 off = h->got.offset;
8084 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8088 if (local_got_offsets == NULL)
8090 off = local_got_offsets[r_symndx];
8091 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8094 if (tls_type == GOT_UNKNOWN)
8101 bfd_boolean need_relocs = FALSE;
8102 Elf_Internal_Rela outrel;
8103 bfd_byte *loc = NULL;
8106 /* The GOT entries have not been initialized yet. Do it
8107 now, and emit any relocations. If both an IE GOT and a
8108 GD GOT are necessary, we emit the GD first. */
8110 if ((info->shared || indx != 0)
8112 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8113 || h->root.type != bfd_link_hash_undefweak))
8116 if (globals->srelgot == NULL)
8118 loc = globals->srelgot->contents;
8119 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8122 if (tls_type & GOT_TLS_GD)
8126 outrel.r_addend = 0;
8127 outrel.r_offset = (globals->sgot->output_section->vma
8128 + globals->sgot->output_offset
8130 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8132 if (globals->use_rel)
8133 bfd_put_32 (output_bfd, outrel.r_addend,
8134 globals->sgot->contents + cur_off);
8136 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8137 globals->srelgot->reloc_count++;
8138 loc += RELOC_SIZE (globals);
8141 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8142 globals->sgot->contents + cur_off + 4);
8145 outrel.r_addend = 0;
8146 outrel.r_info = ELF32_R_INFO (indx,
8147 R_ARM_TLS_DTPOFF32);
8148 outrel.r_offset += 4;
8150 if (globals->use_rel)
8151 bfd_put_32 (output_bfd, outrel.r_addend,
8152 globals->sgot->contents + cur_off + 4);
8155 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8156 globals->srelgot->reloc_count++;
8157 loc += RELOC_SIZE (globals);
8162 /* If we are not emitting relocations for a
8163 general dynamic reference, then we must be in a
8164 static link or an executable link with the
8165 symbol binding locally. Mark it as belonging
8166 to module 1, the executable. */
8167 bfd_put_32 (output_bfd, 1,
8168 globals->sgot->contents + cur_off);
8169 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8170 globals->sgot->contents + cur_off + 4);
8176 if (tls_type & GOT_TLS_IE)
8181 outrel.r_addend = value - dtpoff_base (info);
8183 outrel.r_addend = 0;
8184 outrel.r_offset = (globals->sgot->output_section->vma
8185 + globals->sgot->output_offset
8187 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8189 if (globals->use_rel)
8190 bfd_put_32 (output_bfd, outrel.r_addend,
8191 globals->sgot->contents + cur_off);
8193 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8194 globals->srelgot->reloc_count++;
8195 loc += RELOC_SIZE (globals);
8198 bfd_put_32 (output_bfd, tpoff (info, value),
8199 globals->sgot->contents + cur_off);
8206 local_got_offsets[r_symndx] |= 1;
8209 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8211 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8212 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8214 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8215 contents, rel->r_offset, value,
8219 case R_ARM_TLS_LE32:
8222 (*_bfd_error_handler)
8223 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8224 input_bfd, input_section,
8225 (long) rel->r_offset, howto->name);
8226 return (bfd_reloc_status_type) FALSE;
8229 value = tpoff (info, value);
8231 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8232 contents, rel->r_offset, value,
8236 if (globals->fix_v4bx)
8238 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8240 /* Ensure that we have a BX instruction. */
8241 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8243 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8245 /* Branch to veneer. */
8247 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8248 glue_addr -= input_section->output_section->vma
8249 + input_section->output_offset
8250 + rel->r_offset + 8;
8251 insn = (insn & 0xf0000000) | 0x0a000000
8252 | ((glue_addr >> 2) & 0x00ffffff);
8256 /* Preserve Rm (lowest four bits) and the condition code
8257 (highest four bits). Other bits encode MOV PC,Rm. */
8258 insn = (insn & 0xf000000f) | 0x01a0f000;
8261 bfd_put_32 (input_bfd, insn, hit_data);
8263 return bfd_reloc_ok;
8265 case R_ARM_MOVW_ABS_NC:
8266 case R_ARM_MOVT_ABS:
8267 case R_ARM_MOVW_PREL_NC:
8268 case R_ARM_MOVT_PREL:
8269 /* Until we properly support segment-base-relative addressing then
8270 we assume the segment base to be zero, as for the group relocations.
8271 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8272 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8273 case R_ARM_MOVW_BREL_NC:
8274 case R_ARM_MOVW_BREL:
8275 case R_ARM_MOVT_BREL:
8277 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8279 if (globals->use_rel)
8281 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8282 signed_addend = (addend ^ 0x8000) - 0x8000;
8285 value += signed_addend;
8287 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8288 value -= (input_section->output_section->vma
8289 + input_section->output_offset + rel->r_offset);
8291 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8292 return bfd_reloc_overflow;
8294 if (sym_flags == STT_ARM_TFUNC)
8297 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8298 || r_type == R_ARM_MOVT_BREL)
8302 insn |= value & 0xfff;
8303 insn |= (value & 0xf000) << 4;
8304 bfd_put_32 (input_bfd, insn, hit_data);
8306 return bfd_reloc_ok;
8308 case R_ARM_THM_MOVW_ABS_NC:
8309 case R_ARM_THM_MOVT_ABS:
8310 case R_ARM_THM_MOVW_PREL_NC:
8311 case R_ARM_THM_MOVT_PREL:
8312 /* Until we properly support segment-base-relative addressing then
8313 we assume the segment base to be zero, as for the above relocations.
8314 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8315 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8316 as R_ARM_THM_MOVT_ABS. */
8317 case R_ARM_THM_MOVW_BREL_NC:
8318 case R_ARM_THM_MOVW_BREL:
8319 case R_ARM_THM_MOVT_BREL:
8323 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8324 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8326 if (globals->use_rel)
8328 addend = ((insn >> 4) & 0xf000)
8329 | ((insn >> 15) & 0x0800)
8330 | ((insn >> 4) & 0x0700)
8332 signed_addend = (addend ^ 0x8000) - 0x8000;
8335 value += signed_addend;
8337 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8338 value -= (input_section->output_section->vma
8339 + input_section->output_offset + rel->r_offset);
8341 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8342 return bfd_reloc_overflow;
8344 if (sym_flags == STT_ARM_TFUNC)
8347 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8348 || r_type == R_ARM_THM_MOVT_BREL)
8352 insn |= (value & 0xf000) << 4;
8353 insn |= (value & 0x0800) << 15;
8354 insn |= (value & 0x0700) << 4;
8355 insn |= (value & 0x00ff);
8357 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8358 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8360 return bfd_reloc_ok;
8362 case R_ARM_ALU_PC_G0_NC:
8363 case R_ARM_ALU_PC_G1_NC:
8364 case R_ARM_ALU_PC_G0:
8365 case R_ARM_ALU_PC_G1:
8366 case R_ARM_ALU_PC_G2:
8367 case R_ARM_ALU_SB_G0_NC:
8368 case R_ARM_ALU_SB_G1_NC:
8369 case R_ARM_ALU_SB_G0:
8370 case R_ARM_ALU_SB_G1:
8371 case R_ARM_ALU_SB_G2:
8373 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8374 bfd_vma pc = input_section->output_section->vma
8375 + input_section->output_offset + rel->r_offset;
8376 /* sb should be the origin of the *segment* containing the symbol.
8377 It is not clear how to obtain this OS-dependent value, so we
8378 make an arbitrary choice of zero. */
8382 bfd_signed_vma signed_value;
8385 /* Determine which group of bits to select. */
8388 case R_ARM_ALU_PC_G0_NC:
8389 case R_ARM_ALU_PC_G0:
8390 case R_ARM_ALU_SB_G0_NC:
8391 case R_ARM_ALU_SB_G0:
8395 case R_ARM_ALU_PC_G1_NC:
8396 case R_ARM_ALU_PC_G1:
8397 case R_ARM_ALU_SB_G1_NC:
8398 case R_ARM_ALU_SB_G1:
8402 case R_ARM_ALU_PC_G2:
8403 case R_ARM_ALU_SB_G2:
8411 /* If REL, extract the addend from the insn. If RELA, it will
8412 have already been fetched for us. */
8413 if (globals->use_rel)
8416 bfd_vma constant = insn & 0xff;
8417 bfd_vma rotation = (insn & 0xf00) >> 8;
8420 signed_addend = constant;
8423 /* Compensate for the fact that in the instruction, the
8424 rotation is stored in multiples of 2 bits. */
8427 /* Rotate "constant" right by "rotation" bits. */
8428 signed_addend = (constant >> rotation) |
8429 (constant << (8 * sizeof (bfd_vma) - rotation));
8432 /* Determine if the instruction is an ADD or a SUB.
8433 (For REL, this determines the sign of the addend.) */
8434 negative = identify_add_or_sub (insn);
8437 (*_bfd_error_handler)
8438 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8439 input_bfd, input_section,
8440 (long) rel->r_offset, howto->name);
8441 return bfd_reloc_overflow;
8444 signed_addend *= negative;
8447 /* Compute the value (X) to go in the place. */
8448 if (r_type == R_ARM_ALU_PC_G0_NC
8449 || r_type == R_ARM_ALU_PC_G1_NC
8450 || r_type == R_ARM_ALU_PC_G0
8451 || r_type == R_ARM_ALU_PC_G1
8452 || r_type == R_ARM_ALU_PC_G2)
8454 signed_value = value - pc + signed_addend;
8456 /* Section base relative. */
8457 signed_value = value - sb + signed_addend;
8459 /* If the target symbol is a Thumb function, then set the
8460 Thumb bit in the address. */
8461 if (sym_flags == STT_ARM_TFUNC)
8464 /* Calculate the value of the relevant G_n, in encoded
8465 constant-with-rotation format. */
8466 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8469 /* Check for overflow if required. */
8470 if ((r_type == R_ARM_ALU_PC_G0
8471 || r_type == R_ARM_ALU_PC_G1
8472 || r_type == R_ARM_ALU_PC_G2
8473 || r_type == R_ARM_ALU_SB_G0
8474 || r_type == R_ARM_ALU_SB_G1
8475 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8477 (*_bfd_error_handler)
8478 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8479 input_bfd, input_section,
8480 (long) rel->r_offset, abs (signed_value), howto->name);
8481 return bfd_reloc_overflow;
8484 /* Mask out the value and the ADD/SUB part of the opcode; take care
8485 not to destroy the S bit. */
8488 /* Set the opcode according to whether the value to go in the
8489 place is negative. */
8490 if (signed_value < 0)
8495 /* Encode the offset. */
8498 bfd_put_32 (input_bfd, insn, hit_data);
8500 return bfd_reloc_ok;
8502 case R_ARM_LDR_PC_G0:
8503 case R_ARM_LDR_PC_G1:
8504 case R_ARM_LDR_PC_G2:
8505 case R_ARM_LDR_SB_G0:
8506 case R_ARM_LDR_SB_G1:
8507 case R_ARM_LDR_SB_G2:
8509 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8510 bfd_vma pc = input_section->output_section->vma
8511 + input_section->output_offset + rel->r_offset;
8512 bfd_vma sb = 0; /* See note above. */
8514 bfd_signed_vma signed_value;
8517 /* Determine which groups of bits to calculate. */
8520 case R_ARM_LDR_PC_G0:
8521 case R_ARM_LDR_SB_G0:
8525 case R_ARM_LDR_PC_G1:
8526 case R_ARM_LDR_SB_G1:
8530 case R_ARM_LDR_PC_G2:
8531 case R_ARM_LDR_SB_G2:
8539 /* If REL, extract the addend from the insn. If RELA, it will
8540 have already been fetched for us. */
8541 if (globals->use_rel)
8543 int negative = (insn & (1 << 23)) ? 1 : -1;
8544 signed_addend = negative * (insn & 0xfff);
8547 /* Compute the value (X) to go in the place. */
8548 if (r_type == R_ARM_LDR_PC_G0
8549 || r_type == R_ARM_LDR_PC_G1
8550 || r_type == R_ARM_LDR_PC_G2)
8552 signed_value = value - pc + signed_addend;
8554 /* Section base relative. */
8555 signed_value = value - sb + signed_addend;
8557 /* Calculate the value of the relevant G_{n-1} to obtain
8558 the residual at that stage. */
8559 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8561 /* Check for overflow. */
8562 if (residual >= 0x1000)
8564 (*_bfd_error_handler)
8565 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8566 input_bfd, input_section,
8567 (long) rel->r_offset, abs (signed_value), howto->name);
8568 return bfd_reloc_overflow;
8571 /* Mask out the value and U bit. */
8574 /* Set the U bit if the value to go in the place is non-negative. */
8575 if (signed_value >= 0)
8578 /* Encode the offset. */
8581 bfd_put_32 (input_bfd, insn, hit_data);
8583 return bfd_reloc_ok;
8585 case R_ARM_LDRS_PC_G0:
8586 case R_ARM_LDRS_PC_G1:
8587 case R_ARM_LDRS_PC_G2:
8588 case R_ARM_LDRS_SB_G0:
8589 case R_ARM_LDRS_SB_G1:
8590 case R_ARM_LDRS_SB_G2:
8592 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8593 bfd_vma pc = input_section->output_section->vma
8594 + input_section->output_offset + rel->r_offset;
8595 bfd_vma sb = 0; /* See note above. */
8597 bfd_signed_vma signed_value;
8600 /* Determine which groups of bits to calculate. */
8603 case R_ARM_LDRS_PC_G0:
8604 case R_ARM_LDRS_SB_G0:
8608 case R_ARM_LDRS_PC_G1:
8609 case R_ARM_LDRS_SB_G1:
8613 case R_ARM_LDRS_PC_G2:
8614 case R_ARM_LDRS_SB_G2:
8622 /* If REL, extract the addend from the insn. If RELA, it will
8623 have already been fetched for us. */
8624 if (globals->use_rel)
8626 int negative = (insn & (1 << 23)) ? 1 : -1;
8627 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8630 /* Compute the value (X) to go in the place. */
8631 if (r_type == R_ARM_LDRS_PC_G0
8632 || r_type == R_ARM_LDRS_PC_G1
8633 || r_type == R_ARM_LDRS_PC_G2)
8635 signed_value = value - pc + signed_addend;
8637 /* Section base relative. */
8638 signed_value = value - sb + signed_addend;
8640 /* Calculate the value of the relevant G_{n-1} to obtain
8641 the residual at that stage. */
8642 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8644 /* Check for overflow. */
8645 if (residual >= 0x100)
8647 (*_bfd_error_handler)
8648 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8649 input_bfd, input_section,
8650 (long) rel->r_offset, abs (signed_value), howto->name);
8651 return bfd_reloc_overflow;
8654 /* Mask out the value and U bit. */
8657 /* Set the U bit if the value to go in the place is non-negative. */
8658 if (signed_value >= 0)
8661 /* Encode the offset. */
8662 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8664 bfd_put_32 (input_bfd, insn, hit_data);
8666 return bfd_reloc_ok;
8668 case R_ARM_LDC_PC_G0:
8669 case R_ARM_LDC_PC_G1:
8670 case R_ARM_LDC_PC_G2:
8671 case R_ARM_LDC_SB_G0:
8672 case R_ARM_LDC_SB_G1:
8673 case R_ARM_LDC_SB_G2:
8675 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8676 bfd_vma pc = input_section->output_section->vma
8677 + input_section->output_offset + rel->r_offset;
8678 bfd_vma sb = 0; /* See note above. */
8680 bfd_signed_vma signed_value;
8683 /* Determine which groups of bits to calculate. */
8686 case R_ARM_LDC_PC_G0:
8687 case R_ARM_LDC_SB_G0:
8691 case R_ARM_LDC_PC_G1:
8692 case R_ARM_LDC_SB_G1:
8696 case R_ARM_LDC_PC_G2:
8697 case R_ARM_LDC_SB_G2:
8705 /* If REL, extract the addend from the insn. If RELA, it will
8706 have already been fetched for us. */
8707 if (globals->use_rel)
8709 int negative = (insn & (1 << 23)) ? 1 : -1;
8710 signed_addend = negative * ((insn & 0xff) << 2);
8713 /* Compute the value (X) to go in the place. */
8714 if (r_type == R_ARM_LDC_PC_G0
8715 || r_type == R_ARM_LDC_PC_G1
8716 || r_type == R_ARM_LDC_PC_G2)
8718 signed_value = value - pc + signed_addend;
8720 /* Section base relative. */
8721 signed_value = value - sb + signed_addend;
8723 /* Calculate the value of the relevant G_{n-1} to obtain
8724 the residual at that stage. */
8725 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8727 /* Check for overflow. (The absolute value to go in the place must be
8728 divisible by four and, after having been divided by four, must
8729 fit in eight bits.) */
8730 if ((residual & 0x3) != 0 || residual >= 0x400)
8732 (*_bfd_error_handler)
8733 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8734 input_bfd, input_section,
8735 (long) rel->r_offset, abs (signed_value), howto->name);
8736 return bfd_reloc_overflow;
8739 /* Mask out the value and U bit. */
8742 /* Set the U bit if the value to go in the place is non-negative. */
8743 if (signed_value >= 0)
8746 /* Encode the offset. */
8747 insn |= residual >> 2;
8749 bfd_put_32 (input_bfd, insn, hit_data);
8751 return bfd_reloc_ok;
8754 return bfd_reloc_notsupported;
8758 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8760 arm_add_to_rel (bfd * abfd,
8762 reloc_howto_type * howto,
8763 bfd_signed_vma increment)
8765 bfd_signed_vma addend;
8767 if (howto->type == R_ARM_THM_CALL
8768 || howto->type == R_ARM_THM_JUMP24)
8770 int upper_insn, lower_insn;
8773 upper_insn = bfd_get_16 (abfd, address);
8774 lower_insn = bfd_get_16 (abfd, address + 2);
8775 upper = upper_insn & 0x7ff;
8776 lower = lower_insn & 0x7ff;
8778 addend = (upper << 12) | (lower << 1);
8779 addend += increment;
8782 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8783 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8785 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8786 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8792 contents = bfd_get_32 (abfd, address);
8794 /* Get the (signed) value from the instruction. */
8795 addend = contents & howto->src_mask;
8796 if (addend & ((howto->src_mask + 1) >> 1))
8798 bfd_signed_vma mask;
8801 mask &= ~ howto->src_mask;
8805 /* Add in the increment, (which is a byte value). */
8806 switch (howto->type)
8809 addend += increment;
8816 addend <<= howto->size;
8817 addend += increment;
8819 /* Should we check for overflow here ? */
8821 /* Drop any undesired bits. */
8822 addend >>= howto->rightshift;
8826 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8828 bfd_put_32 (abfd, contents, address);
8832 #define IS_ARM_TLS_RELOC(R_TYPE) \
8833 ((R_TYPE) == R_ARM_TLS_GD32 \
8834 || (R_TYPE) == R_ARM_TLS_LDO32 \
8835 || (R_TYPE) == R_ARM_TLS_LDM32 \
8836 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8837 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8838 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8839 || (R_TYPE) == R_ARM_TLS_LE32 \
8840 || (R_TYPE) == R_ARM_TLS_IE32)
8842 /* Relocate an ARM ELF section. */
8845 elf32_arm_relocate_section (bfd * output_bfd,
8846 struct bfd_link_info * info,
8848 asection * input_section,
8849 bfd_byte * contents,
8850 Elf_Internal_Rela * relocs,
8851 Elf_Internal_Sym * local_syms,
8852 asection ** local_sections)
8854 Elf_Internal_Shdr *symtab_hdr;
8855 struct elf_link_hash_entry **sym_hashes;
8856 Elf_Internal_Rela *rel;
8857 Elf_Internal_Rela *relend;
8859 struct elf32_arm_link_hash_table * globals;
8861 globals = elf32_arm_hash_table (info);
8862 if (globals == NULL)
8865 symtab_hdr = & elf_symtab_hdr (input_bfd);
8866 sym_hashes = elf_sym_hashes (input_bfd);
8869 relend = relocs + input_section->reloc_count;
8870 for (; rel < relend; rel++)
8873 reloc_howto_type * howto;
8874 unsigned long r_symndx;
8875 Elf_Internal_Sym * sym;
8877 struct elf_link_hash_entry * h;
8879 bfd_reloc_status_type r;
8882 bfd_boolean unresolved_reloc = FALSE;
8883 char *error_message = NULL;
8885 r_symndx = ELF32_R_SYM (rel->r_info);
8886 r_type = ELF32_R_TYPE (rel->r_info);
8887 r_type = arm_real_reloc_type (globals, r_type);
8889 if ( r_type == R_ARM_GNU_VTENTRY
8890 || r_type == R_ARM_GNU_VTINHERIT)
8893 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8894 howto = bfd_reloc.howto;
8900 if (r_symndx < symtab_hdr->sh_info)
8902 sym = local_syms + r_symndx;
8903 sym_type = ELF32_ST_TYPE (sym->st_info);
8904 sec = local_sections[r_symndx];
8906 /* An object file might have a reference to a local
8907 undefined symbol. This is a daft object file, but we
8908 should at least do something about it. V4BX & NONE
8909 relocations do not use the symbol and are explicitly
8910 allowed to use the undefined symbol, so allow those.
8911 Likewise for relocations against STN_UNDEF. */
8912 if (r_type != R_ARM_V4BX
8913 && r_type != R_ARM_NONE
8914 && r_symndx != STN_UNDEF
8915 && bfd_is_und_section (sec)
8916 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8918 if (!info->callbacks->undefined_symbol
8919 (info, bfd_elf_string_from_elf_section
8920 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8921 input_bfd, input_section,
8922 rel->r_offset, TRUE))
8926 if (globals->use_rel)
8928 relocation = (sec->output_section->vma
8929 + sec->output_offset
8931 if (!info->relocatable
8932 && (sec->flags & SEC_MERGE)
8933 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8936 bfd_vma addend, value;
8940 case R_ARM_MOVW_ABS_NC:
8941 case R_ARM_MOVT_ABS:
8942 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8943 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8944 addend = (addend ^ 0x8000) - 0x8000;
8947 case R_ARM_THM_MOVW_ABS_NC:
8948 case R_ARM_THM_MOVT_ABS:
8949 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8951 value |= bfd_get_16 (input_bfd,
8952 contents + rel->r_offset + 2);
8953 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8954 | ((value & 0x04000000) >> 15);
8955 addend = (addend ^ 0x8000) - 0x8000;
8959 if (howto->rightshift
8960 || (howto->src_mask & (howto->src_mask + 1)))
8962 (*_bfd_error_handler)
8963 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8964 input_bfd, input_section,
8965 (long) rel->r_offset, howto->name);
8969 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8971 /* Get the (signed) value from the instruction. */
8972 addend = value & howto->src_mask;
8973 if (addend & ((howto->src_mask + 1) >> 1))
8975 bfd_signed_vma mask;
8978 mask &= ~ howto->src_mask;
8986 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8988 addend += msec->output_section->vma + msec->output_offset;
8990 /* Cases here must match those in the preceeding
8991 switch statement. */
8994 case R_ARM_MOVW_ABS_NC:
8995 case R_ARM_MOVT_ABS:
8996 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8998 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
9001 case R_ARM_THM_MOVW_ABS_NC:
9002 case R_ARM_THM_MOVT_ABS:
9003 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
9004 | (addend & 0xff) | ((addend & 0x0800) << 15);
9005 bfd_put_16 (input_bfd, value >> 16,
9006 contents + rel->r_offset);
9007 bfd_put_16 (input_bfd, value,
9008 contents + rel->r_offset + 2);
9012 value = (value & ~ howto->dst_mask)
9013 | (addend & howto->dst_mask);
9014 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
9020 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
9026 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
9027 r_symndx, symtab_hdr, sym_hashes,
9029 unresolved_reloc, warned);
9034 if (sec != NULL && elf_discarded_section (sec))
9036 /* For relocs against symbols from removed linkonce sections,
9037 or sections discarded by a linker script, we just want the
9038 section contents zeroed. Avoid any special processing. */
9039 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
9045 if (info->relocatable)
9047 /* This is a relocatable link. We don't have to change
9048 anything, unless the reloc is against a section symbol,
9049 in which case we have to adjust according to where the
9050 section symbol winds up in the output section. */
9051 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
9053 if (globals->use_rel)
9054 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9055 howto, (bfd_signed_vma) sec->output_offset);
9057 rel->r_addend += sec->output_offset;
9063 name = h->root.root.string;
9066 name = (bfd_elf_string_from_elf_section
9067 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9068 if (name == NULL || *name == '\0')
9069 name = bfd_section_name (input_bfd, sec);
9072 if (r_symndx != STN_UNDEF
9073 && r_type != R_ARM_NONE
9075 || h->root.type == bfd_link_hash_defined
9076 || h->root.type == bfd_link_hash_defweak)
9077 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9079 (*_bfd_error_handler)
9080 ((sym_type == STT_TLS
9081 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9082 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9085 (long) rel->r_offset,
9090 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9091 input_section, contents, rel,
9092 relocation, info, sec, name,
9093 (h ? ELF_ST_TYPE (h->type) :
9094 ELF_ST_TYPE (sym->st_info)), h,
9095 &unresolved_reloc, &error_message);
9097 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9098 because such sections are not SEC_ALLOC and thus ld.so will
9099 not process them. */
9100 if (unresolved_reloc
9101 && !((input_section->flags & SEC_DEBUGGING) != 0
9104 (*_bfd_error_handler)
9105 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9108 (long) rel->r_offset,
9110 h->root.root.string);
9114 if (r != bfd_reloc_ok)
9118 case bfd_reloc_overflow:
9119 /* If the overflowing reloc was to an undefined symbol,
9120 we have already printed one error message and there
9121 is no point complaining again. */
9123 h->root.type != bfd_link_hash_undefined)
9124 && (!((*info->callbacks->reloc_overflow)
9125 (info, (h ? &h->root : NULL), name, howto->name,
9126 (bfd_vma) 0, input_bfd, input_section,
9131 case bfd_reloc_undefined:
9132 if (!((*info->callbacks->undefined_symbol)
9133 (info, name, input_bfd, input_section,
9134 rel->r_offset, TRUE)))
9138 case bfd_reloc_outofrange:
9139 error_message = _("out of range");
9142 case bfd_reloc_notsupported:
9143 error_message = _("unsupported relocation");
9146 case bfd_reloc_dangerous:
9147 /* error_message should already be set. */
9151 error_message = _("unknown error");
9155 BFD_ASSERT (error_message != NULL);
9156 if (!((*info->callbacks->reloc_dangerous)
9157 (info, error_message, input_bfd, input_section,
9168 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9169 adds the edit to the start of the list. (The list must be built in order of
9170 ascending TINDEX: the function's callers are primarily responsible for
9171 maintaining that condition). */
9174 add_unwind_table_edit (arm_unwind_table_edit **head,
9175 arm_unwind_table_edit **tail,
9176 arm_unwind_edit_type type,
9177 asection *linked_section,
9178 unsigned int tindex)
9180 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9181 xmalloc (sizeof (arm_unwind_table_edit));
9183 new_edit->type = type;
9184 new_edit->linked_section = linked_section;
9185 new_edit->index = tindex;
9189 new_edit->next = NULL;
9192 (*tail)->next = new_edit;
9201 new_edit->next = *head;
9210 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9212 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9214 adjust_exidx_size(asection *exidx_sec, int adjust)
9218 if (!exidx_sec->rawsize)
9219 exidx_sec->rawsize = exidx_sec->size;
9221 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9222 out_sec = exidx_sec->output_section;
9223 /* Adjust size of output section. */
9224 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9227 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9229 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9231 struct _arm_elf_section_data *exidx_arm_data;
9233 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9234 add_unwind_table_edit (
9235 &exidx_arm_data->u.exidx.unwind_edit_list,
9236 &exidx_arm_data->u.exidx.unwind_edit_tail,
9237 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9239 adjust_exidx_size(exidx_sec, 8);
9242 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9243 made to those tables, such that:
9245 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9246 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9247 codes which have been inlined into the index).
9249 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
9251 The edits are applied when the tables are written
9252 (in elf32_arm_write_section).
9256 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9257 unsigned int num_text_sections,
9258 struct bfd_link_info *info,
9259 bfd_boolean merge_exidx_entries)
9262 unsigned int last_second_word = 0, i;
9263 asection *last_exidx_sec = NULL;
9264 asection *last_text_sec = NULL;
9265 int last_unwind_type = -1;
9267 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9269 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9273 for (sec = inp->sections; sec != NULL; sec = sec->next)
9275 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9276 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9278 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9281 if (elf_sec->linked_to)
9283 Elf_Internal_Shdr *linked_hdr
9284 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9285 struct _arm_elf_section_data *linked_sec_arm_data
9286 = get_arm_elf_section_data (linked_hdr->bfd_section);
9288 if (linked_sec_arm_data == NULL)
9291 /* Link this .ARM.exidx section back from the text section it
9293 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9298 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9299 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9300 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9302 for (i = 0; i < num_text_sections; i++)
9304 asection *sec = text_section_order[i];
9305 asection *exidx_sec;
9306 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9307 struct _arm_elf_section_data *exidx_arm_data;
9308 bfd_byte *contents = NULL;
9309 int deleted_exidx_bytes = 0;
9311 arm_unwind_table_edit *unwind_edit_head = NULL;
9312 arm_unwind_table_edit *unwind_edit_tail = NULL;
9313 Elf_Internal_Shdr *hdr;
9316 if (arm_data == NULL)
9319 exidx_sec = arm_data->u.text.arm_exidx_sec;
9320 if (exidx_sec == NULL)
9322 /* Section has no unwind data. */
9323 if (last_unwind_type == 0 || !last_exidx_sec)
9326 /* Ignore zero sized sections. */
9330 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9331 last_unwind_type = 0;
9335 /* Skip /DISCARD/ sections. */
9336 if (bfd_is_abs_section (exidx_sec->output_section))
9339 hdr = &elf_section_data (exidx_sec)->this_hdr;
9340 if (hdr->sh_type != SHT_ARM_EXIDX)
9343 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9344 if (exidx_arm_data == NULL)
9347 ibfd = exidx_sec->owner;
9349 if (hdr->contents != NULL)
9350 contents = hdr->contents;
9351 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9355 for (j = 0; j < hdr->sh_size; j += 8)
9357 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9361 /* An EXIDX_CANTUNWIND entry. */
9362 if (second_word == 1)
9364 if (last_unwind_type == 0)
9368 /* Inlined unwinding data. Merge if equal to previous. */
9369 else if ((second_word & 0x80000000) != 0)
9371 if (merge_exidx_entries
9372 && last_second_word == second_word && last_unwind_type == 1)
9375 last_second_word = second_word;
9377 /* Normal table entry. In theory we could merge these too,
9378 but duplicate entries are likely to be much less common. */
9384 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9385 DELETE_EXIDX_ENTRY, NULL, j / 8);
9387 deleted_exidx_bytes += 8;
9390 last_unwind_type = unwind_type;
9393 /* Free contents if we allocated it ourselves. */
9394 if (contents != hdr->contents)
9397 /* Record edits to be applied later (in elf32_arm_write_section). */
9398 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9399 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9401 if (deleted_exidx_bytes > 0)
9402 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9404 last_exidx_sec = exidx_sec;
9405 last_text_sec = sec;
9408 /* Add terminating CANTUNWIND entry. */
9409 if (last_exidx_sec && last_unwind_type != 0)
9410 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9416 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9417 bfd *ibfd, const char *name)
9419 asection *sec, *osec;
9421 sec = bfd_get_section_by_name (ibfd, name);
9422 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9425 osec = sec->output_section;
9426 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9429 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9430 sec->output_offset, sec->size))
9437 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9439 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9440 asection *sec, *osec;
9442 if (globals == NULL)
9445 /* Invoke the regular ELF backend linker to do all the work. */
9446 if (!bfd_elf_final_link (abfd, info))
9449 /* Process stub sections (eg BE8 encoding, ...). */
9450 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9452 for (i=0; i<htab->top_id; i++)
9454 sec = htab->stub_group[i].stub_sec;
9455 /* Only process it once, in its link_sec slot. */
9456 if (sec && i == htab->stub_group[i].link_sec->id)
9458 osec = sec->output_section;
9459 elf32_arm_write_section (abfd, info, sec, sec->contents);
9460 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9461 sec->output_offset, sec->size))
9466 /* Write out any glue sections now that we have created all the
9468 if (globals->bfd_of_glue_owner != NULL)
9470 if (! elf32_arm_output_glue_section (info, abfd,
9471 globals->bfd_of_glue_owner,
9472 ARM2THUMB_GLUE_SECTION_NAME))
9475 if (! elf32_arm_output_glue_section (info, abfd,
9476 globals->bfd_of_glue_owner,
9477 THUMB2ARM_GLUE_SECTION_NAME))
9480 if (! elf32_arm_output_glue_section (info, abfd,
9481 globals->bfd_of_glue_owner,
9482 VFP11_ERRATUM_VENEER_SECTION_NAME))
9485 if (! elf32_arm_output_glue_section (info, abfd,
9486 globals->bfd_of_glue_owner,
9487 ARM_BX_GLUE_SECTION_NAME))
9494 /* Set the right machine number. */
9497 elf32_arm_object_p (bfd *abfd)
9501 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9503 if (mach != bfd_mach_arm_unknown)
9504 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9506 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9507 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9510 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9515 /* Function to keep ARM specific flags in the ELF header. */
9518 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9520 if (elf_flags_init (abfd)
9521 && elf_elfheader (abfd)->e_flags != flags)
9523 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9525 if (flags & EF_ARM_INTERWORK)
9526 (*_bfd_error_handler)
9527 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9531 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9537 elf_elfheader (abfd)->e_flags = flags;
9538 elf_flags_init (abfd) = TRUE;
9544 /* Copy backend specific data from one object module to another. */
9547 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9552 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9555 in_flags = elf_elfheader (ibfd)->e_flags;
9556 out_flags = elf_elfheader (obfd)->e_flags;
9558 if (elf_flags_init (obfd)
9559 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9560 && in_flags != out_flags)
9562 /* Cannot mix APCS26 and APCS32 code. */
9563 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9566 /* Cannot mix float APCS and non-float APCS code. */
9567 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9570 /* If the src and dest have different interworking flags
9571 then turn off the interworking bit. */
9572 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9574 if (out_flags & EF_ARM_INTERWORK)
9576 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9579 in_flags &= ~EF_ARM_INTERWORK;
9582 /* Likewise for PIC, though don't warn for this case. */
9583 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9584 in_flags &= ~EF_ARM_PIC;
9587 elf_elfheader (obfd)->e_flags = in_flags;
9588 elf_flags_init (obfd) = TRUE;
9590 /* Also copy the EI_OSABI field. */
9591 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9592 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9594 /* Copy object attributes. */
9595 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9600 /* Values for Tag_ABI_PCS_R9_use. */
9609 /* Values for Tag_ABI_PCS_RW_data. */
9612 AEABI_PCS_RW_data_absolute,
9613 AEABI_PCS_RW_data_PCrel,
9614 AEABI_PCS_RW_data_SBrel,
9615 AEABI_PCS_RW_data_unused
9618 /* Values for Tag_ABI_enum_size. */
9624 AEABI_enum_forced_wide
9627 /* Determine whether an object attribute tag takes an integer, a
9631 elf32_arm_obj_attrs_arg_type (int tag)
9633 if (tag == Tag_compatibility)
9634 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9635 else if (tag == Tag_nodefaults)
9636 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9637 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9638 return ATTR_TYPE_FLAG_STR_VAL;
9640 return ATTR_TYPE_FLAG_INT_VAL;
9642 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9645 /* The ABI defines that Tag_conformance should be emitted first, and that
9646 Tag_nodefaults should be second (if either is defined). This sets those
9647 two positions, and bumps up the position of all the remaining tags to
9650 elf32_arm_obj_attrs_order (int num)
9652 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
9653 return Tag_conformance;
9654 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
9655 return Tag_nodefaults;
9656 if ((num - 2) < Tag_nodefaults)
9658 if ((num - 1) < Tag_conformance)
9663 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9664 Returns -1 if no architecture could be read. */
9667 get_secondary_compatible_arch (bfd *abfd)
9669 obj_attribute *attr =
9670 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9672 /* Note: the tag and its argument below are uleb128 values, though
9673 currently-defined values fit in one byte for each. */
9675 && attr->s[0] == Tag_CPU_arch
9676 && (attr->s[1] & 128) != 128
9680 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9684 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9685 The tag is removed if ARCH is -1. */
9688 set_secondary_compatible_arch (bfd *abfd, int arch)
9690 obj_attribute *attr =
9691 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9699 /* Note: the tag and its argument below are uleb128 values, though
9700 currently-defined values fit in one byte for each. */
9702 attr->s = (char *) bfd_alloc (abfd, 3);
9703 attr->s[0] = Tag_CPU_arch;
9708 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9712 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9713 int newtag, int secondary_compat)
9715 #define T(X) TAG_CPU_ARCH_##X
9716 int tagl, tagh, result;
9719 T(V6T2), /* PRE_V4. */
9723 T(V6T2), /* V5TE. */
9724 T(V6T2), /* V5TEJ. */
9731 T(V6K), /* PRE_V4. */
9736 T(V6K), /* V5TEJ. */
9738 T(V6KZ), /* V6KZ. */
9744 T(V7), /* PRE_V4. */
9763 T(V6K), /* V5TEJ. */
9765 T(V6KZ), /* V6KZ. */
9778 T(V6K), /* V5TEJ. */
9780 T(V6KZ), /* V6KZ. */
9784 T(V6S_M), /* V6_M. */
9785 T(V6S_M) /* V6S_M. */
9791 T(V7E_M), /* V4T. */
9792 T(V7E_M), /* V5T. */
9793 T(V7E_M), /* V5TE. */
9794 T(V7E_M), /* V5TEJ. */
9796 T(V7E_M), /* V6KZ. */
9797 T(V7E_M), /* V6T2. */
9798 T(V7E_M), /* V6K. */
9800 T(V7E_M), /* V6_M. */
9801 T(V7E_M), /* V6S_M. */
9802 T(V7E_M) /* V7E_M. */
9804 const int v4t_plus_v6_m[] =
9810 T(V5TE), /* V5TE. */
9811 T(V5TEJ), /* V5TEJ. */
9813 T(V6KZ), /* V6KZ. */
9814 T(V6T2), /* V6T2. */
9817 T(V6_M), /* V6_M. */
9818 T(V6S_M), /* V6S_M. */
9819 T(V7E_M), /* V7E_M. */
9820 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9830 /* Pseudo-architecture. */
9834 /* Check we've not got a higher architecture than we know about. */
9836 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9838 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9842 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9844 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9845 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9846 oldtag = T(V4T_PLUS_V6_M);
9848 /* And override the new tag if we have a Tag_also_compatible_with on the
9851 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9852 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9853 newtag = T(V4T_PLUS_V6_M);
9855 tagl = (oldtag < newtag) ? oldtag : newtag;
9856 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9858 /* Architectures before V6KZ add features monotonically. */
9859 if (tagh <= TAG_CPU_ARCH_V6KZ)
9862 result = comb[tagh - T(V6T2)][tagl];
9864 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9865 as the canonical version. */
9866 if (result == T(V4T_PLUS_V6_M))
9869 *secondary_compat_out = T(V6_M);
9872 *secondary_compat_out = -1;
9876 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9877 ibfd, oldtag, newtag);
9885 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9886 are conflicting attributes. */
9889 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9891 obj_attribute *in_attr;
9892 obj_attribute *out_attr;
9893 obj_attribute_list *in_list;
9894 obj_attribute_list *out_list;
9895 obj_attribute_list **out_listp;
9896 /* Some tags have 0 = don't care, 1 = strong requirement,
9897 2 = weak requirement. */
9898 static const int order_021[3] = {0, 2, 1};
9900 bfd_boolean result = TRUE;
9902 /* Skip the linker stubs file. This preserves previous behavior
9903 of accepting unknown attributes in the first input file - but
9905 if (ibfd->flags & BFD_LINKER_CREATED)
9908 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9910 /* This is the first object. Copy the attributes. */
9911 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9913 out_attr = elf_known_obj_attributes_proc (obfd);
9915 /* Use the Tag_null value to indicate the attributes have been
9919 /* We do not output objects with Tag_MPextension_use_legacy - we move
9920 the attribute's value to Tag_MPextension_use. */
9921 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9923 if (out_attr[Tag_MPextension_use].i != 0
9924 && out_attr[Tag_MPextension_use_legacy].i
9925 != out_attr[Tag_MPextension_use].i)
9928 (_("Error: %B has both the current and legacy "
9929 "Tag_MPextension_use attributes"), ibfd);
9933 out_attr[Tag_MPextension_use] =
9934 out_attr[Tag_MPextension_use_legacy];
9935 out_attr[Tag_MPextension_use_legacy].type = 0;
9936 out_attr[Tag_MPextension_use_legacy].i = 0;
9942 in_attr = elf_known_obj_attributes_proc (ibfd);
9943 out_attr = elf_known_obj_attributes_proc (obfd);
9944 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9945 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9947 /* Ignore mismatches if the object doesn't use floating point. */
9948 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9949 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9950 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9953 (_("error: %B uses VFP register arguments, %B does not"),
9954 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9955 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9960 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9962 /* Merge this attribute with existing attributes. */
9965 case Tag_CPU_raw_name:
9967 /* These are merged after Tag_CPU_arch. */
9970 case Tag_ABI_optimization_goals:
9971 case Tag_ABI_FP_optimization_goals:
9972 /* Use the first value seen. */
9977 int secondary_compat = -1, secondary_compat_out = -1;
9978 unsigned int saved_out_attr = out_attr[i].i;
9979 static const char *name_table[] = {
9980 /* These aren't real CPU names, but we can't guess
9981 that from the architecture version alone. */
9997 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9998 secondary_compat = get_secondary_compatible_arch (ibfd);
9999 secondary_compat_out = get_secondary_compatible_arch (obfd);
10000 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
10001 &secondary_compat_out,
10004 set_secondary_compatible_arch (obfd, secondary_compat_out);
10006 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
10007 if (out_attr[i].i == saved_out_attr)
10008 ; /* Leave the names alone. */
10009 else if (out_attr[i].i == in_attr[i].i)
10011 /* The output architecture has been changed to match the
10012 input architecture. Use the input names. */
10013 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
10014 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
10016 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
10017 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
10022 out_attr[Tag_CPU_name].s = NULL;
10023 out_attr[Tag_CPU_raw_name].s = NULL;
10026 /* If we still don't have a value for Tag_CPU_name,
10027 make one up now. Tag_CPU_raw_name remains blank. */
10028 if (out_attr[Tag_CPU_name].s == NULL
10029 && out_attr[i].i < ARRAY_SIZE (name_table))
10030 out_attr[Tag_CPU_name].s =
10031 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
10035 case Tag_ARM_ISA_use:
10036 case Tag_THUMB_ISA_use:
10037 case Tag_WMMX_arch:
10038 case Tag_Advanced_SIMD_arch:
10039 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
10040 case Tag_ABI_FP_rounding:
10041 case Tag_ABI_FP_exceptions:
10042 case Tag_ABI_FP_user_exceptions:
10043 case Tag_ABI_FP_number_model:
10044 case Tag_FP_HP_extension:
10045 case Tag_CPU_unaligned_access:
10047 case Tag_MPextension_use:
10048 /* Use the largest value specified. */
10049 if (in_attr[i].i > out_attr[i].i)
10050 out_attr[i].i = in_attr[i].i;
10053 case Tag_ABI_align_preserved:
10054 case Tag_ABI_PCS_RO_data:
10055 /* Use the smallest value specified. */
10056 if (in_attr[i].i < out_attr[i].i)
10057 out_attr[i].i = in_attr[i].i;
10060 case Tag_ABI_align_needed:
10061 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10062 && (in_attr[Tag_ABI_align_preserved].i == 0
10063 || out_attr[Tag_ABI_align_preserved].i == 0))
10065 /* This error message should be enabled once all non-conformant
10066 binaries in the toolchain have had the attributes set
10069 (_("error: %B: 8-byte data alignment conflicts with %B"),
10073 /* Fall through. */
10074 case Tag_ABI_FP_denormal:
10075 case Tag_ABI_PCS_GOT_use:
10076 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10077 value if greater than 2 (for future-proofing). */
10078 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10079 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10080 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10081 out_attr[i].i = in_attr[i].i;
10084 case Tag_Virtualization_use:
10085 /* The virtualization tag effectively stores two bits of
10086 information: the intended use of TrustZone (in bit 0), and the
10087 intended use of Virtualization (in bit 1). */
10088 if (out_attr[i].i == 0)
10089 out_attr[i].i = in_attr[i].i;
10090 else if (in_attr[i].i != 0
10091 && in_attr[i].i != out_attr[i].i)
10093 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
10098 (_("error: %B: unable to merge virtualization attributes "
10106 case Tag_CPU_arch_profile:
10107 if (out_attr[i].i != in_attr[i].i)
10109 /* 0 will merge with anything.
10110 'A' and 'S' merge to 'A'.
10111 'R' and 'S' merge to 'R'.
10112 'M' and 'A|R|S' is an error. */
10113 if (out_attr[i].i == 0
10114 || (out_attr[i].i == 'S'
10115 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10116 out_attr[i].i = in_attr[i].i;
10117 else if (in_attr[i].i == 0
10118 || (in_attr[i].i == 'S'
10119 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10120 ; /* Do nothing. */
10124 (_("error: %B: Conflicting architecture profiles %c/%c"),
10126 in_attr[i].i ? in_attr[i].i : '0',
10127 out_attr[i].i ? out_attr[i].i : '0');
10134 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
10135 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
10136 when it's 0. It might mean absence of FP hardware if
10137 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
10139 static const struct
10143 } vfp_versions[7] =
10157 /* If the output has no requirement about FP hardware,
10158 follow the requirement of the input. */
10159 if (out_attr[i].i == 0)
10161 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
10162 out_attr[i].i = in_attr[i].i;
10163 out_attr[Tag_ABI_HardFP_use].i
10164 = in_attr[Tag_ABI_HardFP_use].i;
10167 /* If the input has no requirement about FP hardware, do
10169 else if (in_attr[i].i == 0)
10171 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
10175 /* Both the input and the output have nonzero Tag_FP_arch.
10176 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
10178 /* If both the input and the output have zero Tag_ABI_HardFP_use,
10180 if (in_attr[Tag_ABI_HardFP_use].i == 0
10181 && out_attr[Tag_ABI_HardFP_use].i == 0)
10183 /* If the input and the output have different Tag_ABI_HardFP_use,
10184 the combination of them is 3 (SP & DP). */
10185 else if (in_attr[Tag_ABI_HardFP_use].i
10186 != out_attr[Tag_ABI_HardFP_use].i)
10187 out_attr[Tag_ABI_HardFP_use].i = 3;
10189 /* Now we can handle Tag_FP_arch. */
10191 /* Values greater than 6 aren't defined, so just pick the
10193 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10195 out_attr[i] = in_attr[i];
10198 /* The output uses the superset of input features
10199 (ISA version) and registers. */
10200 ver = vfp_versions[in_attr[i].i].ver;
10201 if (ver < vfp_versions[out_attr[i].i].ver)
10202 ver = vfp_versions[out_attr[i].i].ver;
10203 regs = vfp_versions[in_attr[i].i].regs;
10204 if (regs < vfp_versions[out_attr[i].i].regs)
10205 regs = vfp_versions[out_attr[i].i].regs;
10206 /* This assumes all possible supersets are also a valid
10208 for (newval = 6; newval > 0; newval--)
10210 if (regs == vfp_versions[newval].regs
10211 && ver == vfp_versions[newval].ver)
10214 out_attr[i].i = newval;
10217 case Tag_PCS_config:
10218 if (out_attr[i].i == 0)
10219 out_attr[i].i = in_attr[i].i;
10220 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10222 /* It's sometimes ok to mix different configs, so this is only
10225 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10228 case Tag_ABI_PCS_R9_use:
10229 if (in_attr[i].i != out_attr[i].i
10230 && out_attr[i].i != AEABI_R9_unused
10231 && in_attr[i].i != AEABI_R9_unused)
10234 (_("error: %B: Conflicting use of R9"), ibfd);
10237 if (out_attr[i].i == AEABI_R9_unused)
10238 out_attr[i].i = in_attr[i].i;
10240 case Tag_ABI_PCS_RW_data:
10241 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10242 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10243 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10246 (_("error: %B: SB relative addressing conflicts with use of R9"),
10250 /* Use the smallest value specified. */
10251 if (in_attr[i].i < out_attr[i].i)
10252 out_attr[i].i = in_attr[i].i;
10254 case Tag_ABI_PCS_wchar_t:
10255 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10256 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10259 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10260 ibfd, in_attr[i].i, out_attr[i].i);
10262 else if (in_attr[i].i && !out_attr[i].i)
10263 out_attr[i].i = in_attr[i].i;
10265 case Tag_ABI_enum_size:
10266 if (in_attr[i].i != AEABI_enum_unused)
10268 if (out_attr[i].i == AEABI_enum_unused
10269 || out_attr[i].i == AEABI_enum_forced_wide)
10271 /* The existing object is compatible with anything.
10272 Use whatever requirements the new object has. */
10273 out_attr[i].i = in_attr[i].i;
10275 else if (in_attr[i].i != AEABI_enum_forced_wide
10276 && out_attr[i].i != in_attr[i].i
10277 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10279 static const char *aeabi_enum_names[] =
10280 { "", "variable-size", "32-bit", "" };
10281 const char *in_name =
10282 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10283 ? aeabi_enum_names[in_attr[i].i]
10285 const char *out_name =
10286 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10287 ? aeabi_enum_names[out_attr[i].i]
10290 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10291 ibfd, in_name, out_name);
10295 case Tag_ABI_VFP_args:
10298 case Tag_ABI_WMMX_args:
10299 if (in_attr[i].i != out_attr[i].i)
10302 (_("error: %B uses iWMMXt register arguments, %B does not"),
10307 case Tag_compatibility:
10308 /* Merged in target-independent code. */
10310 case Tag_ABI_HardFP_use:
10311 /* This is handled along with Tag_FP_arch. */
10313 case Tag_ABI_FP_16bit_format:
10314 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10316 if (in_attr[i].i != out_attr[i].i)
10319 (_("error: fp16 format mismatch between %B and %B"),
10324 if (in_attr[i].i != 0)
10325 out_attr[i].i = in_attr[i].i;
10329 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10330 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10331 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10332 CPU. We will merge as follows: If the input attribute's value
10333 is one then the output attribute's value remains unchanged. If
10334 the input attribute's value is zero or two then if the output
10335 attribute's value is one the output value is set to the input
10336 value, otherwise the output value must be the same as the
10338 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10340 if (in_attr[i].i != out_attr[i].i)
10343 (_("DIV usage mismatch between %B and %B"),
10349 if (in_attr[i].i != 1)
10350 out_attr[i].i = in_attr[i].i;
10354 case Tag_MPextension_use_legacy:
10355 /* We don't output objects with Tag_MPextension_use_legacy - we
10356 move the value to Tag_MPextension_use. */
10357 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10359 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10362 (_("%B has has both the current and legacy "
10363 "Tag_MPextension_use attributes"),
10369 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10370 out_attr[Tag_MPextension_use] = in_attr[i];
10374 case Tag_nodefaults:
10375 /* This tag is set if it exists, but the value is unused (and is
10376 typically zero). We don't actually need to do anything here -
10377 the merge happens automatically when the type flags are merged
10380 case Tag_also_compatible_with:
10381 /* Already done in Tag_CPU_arch. */
10383 case Tag_conformance:
10384 /* Keep the attribute if it matches. Throw it away otherwise.
10385 No attribute means no claim to conform. */
10386 if (!in_attr[i].s || !out_attr[i].s
10387 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10388 out_attr[i].s = NULL;
10393 bfd *err_bfd = NULL;
10395 /* The "known_obj_attributes" table does contain some undefined
10396 attributes. Ensure that there are unused. */
10397 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10399 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10402 if (err_bfd != NULL)
10404 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10405 if ((i & 127) < 64)
10408 (_("%B: Unknown mandatory EABI object attribute %d"),
10410 bfd_set_error (bfd_error_bad_value);
10416 (_("Warning: %B: Unknown EABI object attribute %d"),
10421 /* Only pass on attributes that match in both inputs. */
10422 if (in_attr[i].i != out_attr[i].i
10423 || in_attr[i].s != out_attr[i].s
10424 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10425 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10428 out_attr[i].s = NULL;
10433 /* If out_attr was copied from in_attr then it won't have a type yet. */
10434 if (in_attr[i].type && !out_attr[i].type)
10435 out_attr[i].type = in_attr[i].type;
10438 /* Merge Tag_compatibility attributes and any common GNU ones. */
10439 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10442 /* Check for any attributes not known on ARM. */
10443 in_list = elf_other_obj_attributes_proc (ibfd);
10444 out_listp = &elf_other_obj_attributes_proc (obfd);
10445 out_list = *out_listp;
10447 for (; in_list || out_list; )
10449 bfd *err_bfd = NULL;
10452 /* The tags for each list are in numerical order. */
10453 /* If the tags are equal, then merge. */
10454 if (out_list && (!in_list || in_list->tag > out_list->tag))
10456 /* This attribute only exists in obfd. We can't merge, and we don't
10457 know what the tag means, so delete it. */
10459 err_tag = out_list->tag;
10460 *out_listp = out_list->next;
10461 out_list = *out_listp;
10463 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10465 /* This attribute only exists in ibfd. We can't merge, and we don't
10466 know what the tag means, so ignore it. */
10468 err_tag = in_list->tag;
10469 in_list = in_list->next;
10471 else /* The tags are equal. */
10473 /* As present, all attributes in the list are unknown, and
10474 therefore can't be merged meaningfully. */
10476 err_tag = out_list->tag;
10478 /* Only pass on attributes that match in both inputs. */
10479 if (in_list->attr.i != out_list->attr.i
10480 || in_list->attr.s != out_list->attr.s
10481 || (in_list->attr.s && out_list->attr.s
10482 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10484 /* No match. Delete the attribute. */
10485 *out_listp = out_list->next;
10486 out_list = *out_listp;
10490 /* Matched. Keep the attribute and move to the next. */
10491 out_list = out_list->next;
10492 in_list = in_list->next;
10498 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10499 if ((err_tag & 127) < 64)
10502 (_("%B: Unknown mandatory EABI object attribute %d"),
10504 bfd_set_error (bfd_error_bad_value);
10510 (_("Warning: %B: Unknown EABI object attribute %d"),
10519 /* Return TRUE if the two EABI versions are incompatible. */
10522 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10524 /* v4 and v5 are the same spec before and after it was released,
10525 so allow mixing them. */
10526 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10527 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10530 return (iver == over);
10533 /* Merge backend specific data from an object file to the output
10534 object file when linking. */
10537 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10539 /* Display the flags field. */
10542 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10544 FILE * file = (FILE *) ptr;
10545 unsigned long flags;
10547 BFD_ASSERT (abfd != NULL && ptr != NULL);
10549 /* Print normal ELF private data. */
10550 _bfd_elf_print_private_bfd_data (abfd, ptr);
10552 flags = elf_elfheader (abfd)->e_flags;
10553 /* Ignore init flag - it may not be set, despite the flags field
10554 containing valid data. */
10556 /* xgettext:c-format */
10557 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10559 switch (EF_ARM_EABI_VERSION (flags))
10561 case EF_ARM_EABI_UNKNOWN:
10562 /* The following flag bits are GNU extensions and not part of the
10563 official ARM ELF extended ABI. Hence they are only decoded if
10564 the EABI version is not set. */
10565 if (flags & EF_ARM_INTERWORK)
10566 fprintf (file, _(" [interworking enabled]"));
10568 if (flags & EF_ARM_APCS_26)
10569 fprintf (file, " [APCS-26]");
10571 fprintf (file, " [APCS-32]");
10573 if (flags & EF_ARM_VFP_FLOAT)
10574 fprintf (file, _(" [VFP float format]"));
10575 else if (flags & EF_ARM_MAVERICK_FLOAT)
10576 fprintf (file, _(" [Maverick float format]"));
10578 fprintf (file, _(" [FPA float format]"));
10580 if (flags & EF_ARM_APCS_FLOAT)
10581 fprintf (file, _(" [floats passed in float registers]"));
10583 if (flags & EF_ARM_PIC)
10584 fprintf (file, _(" [position independent]"));
10586 if (flags & EF_ARM_NEW_ABI)
10587 fprintf (file, _(" [new ABI]"));
10589 if (flags & EF_ARM_OLD_ABI)
10590 fprintf (file, _(" [old ABI]"));
10592 if (flags & EF_ARM_SOFT_FLOAT)
10593 fprintf (file, _(" [software FP]"));
10595 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10596 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10597 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10598 | EF_ARM_MAVERICK_FLOAT);
10601 case EF_ARM_EABI_VER1:
10602 fprintf (file, _(" [Version1 EABI]"));
10604 if (flags & EF_ARM_SYMSARESORTED)
10605 fprintf (file, _(" [sorted symbol table]"));
10607 fprintf (file, _(" [unsorted symbol table]"));
10609 flags &= ~ EF_ARM_SYMSARESORTED;
10612 case EF_ARM_EABI_VER2:
10613 fprintf (file, _(" [Version2 EABI]"));
10615 if (flags & EF_ARM_SYMSARESORTED)
10616 fprintf (file, _(" [sorted symbol table]"));
10618 fprintf (file, _(" [unsorted symbol table]"));
10620 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10621 fprintf (file, _(" [dynamic symbols use segment index]"));
10623 if (flags & EF_ARM_MAPSYMSFIRST)
10624 fprintf (file, _(" [mapping symbols precede others]"));
10626 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10627 | EF_ARM_MAPSYMSFIRST);
10630 case EF_ARM_EABI_VER3:
10631 fprintf (file, _(" [Version3 EABI]"));
10634 case EF_ARM_EABI_VER4:
10635 fprintf (file, _(" [Version4 EABI]"));
10638 case EF_ARM_EABI_VER5:
10639 fprintf (file, _(" [Version5 EABI]"));
10641 if (flags & EF_ARM_BE8)
10642 fprintf (file, _(" [BE8]"));
10644 if (flags & EF_ARM_LE8)
10645 fprintf (file, _(" [LE8]"));
10647 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10651 fprintf (file, _(" <EABI version unrecognised>"));
10655 flags &= ~ EF_ARM_EABIMASK;
10657 if (flags & EF_ARM_RELEXEC)
10658 fprintf (file, _(" [relocatable executable]"));
10660 if (flags & EF_ARM_HASENTRY)
10661 fprintf (file, _(" [has entry point]"));
10663 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10666 fprintf (file, _("<Unrecognised flag bits set>"));
10668 fputc ('\n', file);
10674 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10676 switch (ELF_ST_TYPE (elf_sym->st_info))
10678 case STT_ARM_TFUNC:
10679 return ELF_ST_TYPE (elf_sym->st_info);
10681 case STT_ARM_16BIT:
10682 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10683 This allows us to distinguish between data used by Thumb instructions
10684 and non-data (which is probably code) inside Thumb regions of an
10686 if (type != STT_OBJECT && type != STT_TLS)
10687 return ELF_ST_TYPE (elf_sym->st_info);
10698 elf32_arm_gc_mark_hook (asection *sec,
10699 struct bfd_link_info *info,
10700 Elf_Internal_Rela *rel,
10701 struct elf_link_hash_entry *h,
10702 Elf_Internal_Sym *sym)
10705 switch (ELF32_R_TYPE (rel->r_info))
10707 case R_ARM_GNU_VTINHERIT:
10708 case R_ARM_GNU_VTENTRY:
10712 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10715 /* Update the got entry reference counts for the section being removed. */
10718 elf32_arm_gc_sweep_hook (bfd * abfd,
10719 struct bfd_link_info * info,
10721 const Elf_Internal_Rela * relocs)
10723 Elf_Internal_Shdr *symtab_hdr;
10724 struct elf_link_hash_entry **sym_hashes;
10725 bfd_signed_vma *local_got_refcounts;
10726 const Elf_Internal_Rela *rel, *relend;
10727 struct elf32_arm_link_hash_table * globals;
10729 if (info->relocatable)
10732 globals = elf32_arm_hash_table (info);
10733 if (globals == NULL)
10736 elf_section_data (sec)->local_dynrel = NULL;
10738 symtab_hdr = & elf_symtab_hdr (abfd);
10739 sym_hashes = elf_sym_hashes (abfd);
10740 local_got_refcounts = elf_local_got_refcounts (abfd);
10742 check_use_blx (globals);
10744 relend = relocs + sec->reloc_count;
10745 for (rel = relocs; rel < relend; rel++)
10747 unsigned long r_symndx;
10748 struct elf_link_hash_entry *h = NULL;
10751 r_symndx = ELF32_R_SYM (rel->r_info);
10752 if (r_symndx >= symtab_hdr->sh_info)
10754 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10755 while (h->root.type == bfd_link_hash_indirect
10756 || h->root.type == bfd_link_hash_warning)
10757 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10760 r_type = ELF32_R_TYPE (rel->r_info);
10761 r_type = arm_real_reloc_type (globals, r_type);
10765 case R_ARM_GOT_PREL:
10766 case R_ARM_TLS_GD32:
10767 case R_ARM_TLS_IE32:
10770 if (h->got.refcount > 0)
10771 h->got.refcount -= 1;
10773 else if (local_got_refcounts != NULL)
10775 if (local_got_refcounts[r_symndx] > 0)
10776 local_got_refcounts[r_symndx] -= 1;
10780 case R_ARM_TLS_LDM32:
10781 globals->tls_ldm_got.refcount -= 1;
10785 case R_ARM_ABS32_NOI:
10787 case R_ARM_REL32_NOI:
10793 case R_ARM_THM_CALL:
10794 case R_ARM_THM_JUMP24:
10795 case R_ARM_THM_JUMP19:
10796 case R_ARM_MOVW_ABS_NC:
10797 case R_ARM_MOVT_ABS:
10798 case R_ARM_MOVW_PREL_NC:
10799 case R_ARM_MOVT_PREL:
10800 case R_ARM_THM_MOVW_ABS_NC:
10801 case R_ARM_THM_MOVT_ABS:
10802 case R_ARM_THM_MOVW_PREL_NC:
10803 case R_ARM_THM_MOVT_PREL:
10804 /* Should the interworking branches be here also? */
10808 struct elf32_arm_link_hash_entry *eh;
10809 struct elf32_arm_relocs_copied **pp;
10810 struct elf32_arm_relocs_copied *p;
10812 eh = (struct elf32_arm_link_hash_entry *) h;
10814 if (h->plt.refcount > 0)
10816 h->plt.refcount -= 1;
10817 if (r_type == R_ARM_THM_CALL)
10818 eh->plt_maybe_thumb_refcount--;
10820 if (r_type == R_ARM_THM_JUMP24
10821 || r_type == R_ARM_THM_JUMP19)
10822 eh->plt_thumb_refcount--;
10825 if (r_type == R_ARM_ABS32
10826 || r_type == R_ARM_REL32
10827 || r_type == R_ARM_ABS32_NOI
10828 || r_type == R_ARM_REL32_NOI)
10830 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10832 if (p->section == sec)
10835 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10836 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10854 /* Look through the relocs for a section during the first phase. */
10857 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10858 asection *sec, const Elf_Internal_Rela *relocs)
10860 Elf_Internal_Shdr *symtab_hdr;
10861 struct elf_link_hash_entry **sym_hashes;
10862 const Elf_Internal_Rela *rel;
10863 const Elf_Internal_Rela *rel_end;
10866 struct elf32_arm_link_hash_table *htab;
10867 bfd_boolean needs_plt;
10868 unsigned long nsyms;
10870 if (info->relocatable)
10873 BFD_ASSERT (is_arm_elf (abfd));
10875 htab = elf32_arm_hash_table (info);
10881 /* Create dynamic sections for relocatable executables so that we can
10882 copy relocations. */
10883 if (htab->root.is_relocatable_executable
10884 && ! htab->root.dynamic_sections_created)
10886 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10890 dynobj = elf_hash_table (info)->dynobj;
10891 symtab_hdr = & elf_symtab_hdr (abfd);
10892 sym_hashes = elf_sym_hashes (abfd);
10893 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10895 rel_end = relocs + sec->reloc_count;
10896 for (rel = relocs; rel < rel_end; rel++)
10898 struct elf_link_hash_entry *h;
10899 struct elf32_arm_link_hash_entry *eh;
10900 unsigned long r_symndx;
10903 r_symndx = ELF32_R_SYM (rel->r_info);
10904 r_type = ELF32_R_TYPE (rel->r_info);
10905 r_type = arm_real_reloc_type (htab, r_type);
10907 if (r_symndx >= nsyms
10908 /* PR 9934: It is possible to have relocations that do not
10909 refer to symbols, thus it is also possible to have an
10910 object file containing relocations but no symbol table. */
10911 && (r_symndx > STN_UNDEF || nsyms > 0))
10913 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10918 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10922 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10923 while (h->root.type == bfd_link_hash_indirect
10924 || h->root.type == bfd_link_hash_warning)
10925 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10928 eh = (struct elf32_arm_link_hash_entry *) h;
10933 case R_ARM_GOT_PREL:
10934 case R_ARM_TLS_GD32:
10935 case R_ARM_TLS_IE32:
10936 /* This symbol requires a global offset table entry. */
10938 int tls_type, old_tls_type;
10942 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10943 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10944 default: tls_type = GOT_NORMAL; break;
10950 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10954 bfd_signed_vma *local_got_refcounts;
10956 /* This is a global offset table entry for a local symbol. */
10957 local_got_refcounts = elf_local_got_refcounts (abfd);
10958 if (local_got_refcounts == NULL)
10960 bfd_size_type size;
10962 size = symtab_hdr->sh_info;
10963 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10964 local_got_refcounts = (bfd_signed_vma *)
10965 bfd_zalloc (abfd, size);
10966 if (local_got_refcounts == NULL)
10968 elf_local_got_refcounts (abfd) = local_got_refcounts;
10969 elf32_arm_local_got_tls_type (abfd)
10970 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10972 local_got_refcounts[r_symndx] += 1;
10973 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10976 /* We will already have issued an error message if there is a
10977 TLS / non-TLS mismatch, based on the symbol type. We don't
10978 support any linker relaxations. So just combine any TLS
10980 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10981 && tls_type != GOT_NORMAL)
10982 tls_type |= old_tls_type;
10984 if (old_tls_type != tls_type)
10987 elf32_arm_hash_entry (h)->tls_type = tls_type;
10989 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10992 /* Fall through. */
10994 case R_ARM_TLS_LDM32:
10995 if (r_type == R_ARM_TLS_LDM32)
10996 htab->tls_ldm_got.refcount++;
10997 /* Fall through. */
10999 case R_ARM_GOTOFF32:
11001 if (htab->sgot == NULL)
11003 if (htab->root.dynobj == NULL)
11004 htab->root.dynobj = abfd;
11005 if (!create_got_section (htab->root.dynobj, info))
11011 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
11012 ldr __GOTT_INDEX__ offsets. */
11013 if (!htab->vxworks_p)
11015 /* Fall through. */
11022 case R_ARM_THM_CALL:
11023 case R_ARM_THM_JUMP24:
11024 case R_ARM_THM_JUMP19:
11028 case R_ARM_MOVW_ABS_NC:
11029 case R_ARM_MOVT_ABS:
11030 case R_ARM_THM_MOVW_ABS_NC:
11031 case R_ARM_THM_MOVT_ABS:
11034 (*_bfd_error_handler)
11035 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
11036 abfd, elf32_arm_howto_table_1[r_type].name,
11037 (h) ? h->root.root.string : "a local symbol");
11038 bfd_set_error (bfd_error_bad_value);
11042 /* Fall through. */
11044 case R_ARM_ABS32_NOI:
11046 case R_ARM_REL32_NOI:
11047 case R_ARM_MOVW_PREL_NC:
11048 case R_ARM_MOVT_PREL:
11049 case R_ARM_THM_MOVW_PREL_NC:
11050 case R_ARM_THM_MOVT_PREL:
11054 /* Should the interworking branches be listed here? */
11057 /* If this reloc is in a read-only section, we might
11058 need a copy reloc. We can't check reliably at this
11059 stage whether the section is read-only, as input
11060 sections have not yet been mapped to output sections.
11061 Tentatively set the flag for now, and correct in
11062 adjust_dynamic_symbol. */
11064 h->non_got_ref = 1;
11066 /* We may need a .plt entry if the function this reloc
11067 refers to is in a different object. We can't tell for
11068 sure yet, because something later might force the
11073 /* If we create a PLT entry, this relocation will reference
11074 it, even if it's an ABS32 relocation. */
11075 h->plt.refcount += 1;
11077 /* It's too early to use htab->use_blx here, so we have to
11078 record possible blx references separately from
11079 relocs that definitely need a thumb stub. */
11081 if (r_type == R_ARM_THM_CALL)
11082 eh->plt_maybe_thumb_refcount += 1;
11084 if (r_type == R_ARM_THM_JUMP24
11085 || r_type == R_ARM_THM_JUMP19)
11086 eh->plt_thumb_refcount += 1;
11089 /* If we are creating a shared library or relocatable executable,
11090 and this is a reloc against a global symbol, or a non PC
11091 relative reloc against a local symbol, then we need to copy
11092 the reloc into the shared library. However, if we are linking
11093 with -Bsymbolic, we do not need to copy a reloc against a
11094 global symbol which is defined in an object we are
11095 including in the link (i.e., DEF_REGULAR is set). At
11096 this point we have not seen all the input files, so it is
11097 possible that DEF_REGULAR is not set now but will be set
11098 later (it is never cleared). We account for that
11099 possibility below by storing information in the
11100 relocs_copied field of the hash table entry. */
11101 if ((info->shared || htab->root.is_relocatable_executable)
11102 && (sec->flags & SEC_ALLOC) != 0
11103 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
11104 || (h != NULL && ! h->needs_plt
11105 && (! info->symbolic || ! h->def_regular))))
11107 struct elf32_arm_relocs_copied *p, **head;
11109 /* When creating a shared object, we must copy these
11110 reloc types into the output file. We create a reloc
11111 section in dynobj and make room for this reloc. */
11112 if (sreloc == NULL)
11114 sreloc = _bfd_elf_make_dynamic_reloc_section
11115 (sec, dynobj, 2, abfd, ! htab->use_rel);
11117 if (sreloc == NULL)
11120 /* BPABI objects never have dynamic relocations mapped. */
11121 if (htab->symbian_p)
11125 flags = bfd_get_section_flags (dynobj, sreloc);
11126 flags &= ~(SEC_LOAD | SEC_ALLOC);
11127 bfd_set_section_flags (dynobj, sreloc, flags);
11131 /* If this is a global symbol, we count the number of
11132 relocations we need for this symbol. */
11135 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11139 /* Track dynamic relocs needed for local syms too.
11140 We really need local syms available to do this
11141 easily. Oh well. */
11144 Elf_Internal_Sym *isym;
11146 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11151 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11155 vpp = &elf_section_data (s)->local_dynrel;
11156 head = (struct elf32_arm_relocs_copied **) vpp;
11160 if (p == NULL || p->section != sec)
11162 bfd_size_type amt = sizeof *p;
11164 p = (struct elf32_arm_relocs_copied *)
11165 bfd_alloc (htab->root.dynobj, amt);
11175 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11181 /* This relocation describes the C++ object vtable hierarchy.
11182 Reconstruct it for later use during GC. */
11183 case R_ARM_GNU_VTINHERIT:
11184 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11188 /* This relocation describes which C++ vtable entries are actually
11189 used. Record for later use during GC. */
11190 case R_ARM_GNU_VTENTRY:
11191 BFD_ASSERT (h != NULL);
11193 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11202 /* Unwinding tables are not referenced directly. This pass marks them as
11203 required if the corresponding code section is marked. */
11206 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11207 elf_gc_mark_hook_fn gc_mark_hook)
11210 Elf_Internal_Shdr **elf_shdrp;
11213 /* Marking EH data may cause additional code sections to be marked,
11214 requiring multiple passes. */
11219 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11223 if (! is_arm_elf (sub))
11226 elf_shdrp = elf_elfsections (sub);
11227 for (o = sub->sections; o != NULL; o = o->next)
11229 Elf_Internal_Shdr *hdr;
11231 hdr = &elf_section_data (o)->this_hdr;
11232 if (hdr->sh_type == SHT_ARM_EXIDX
11234 && hdr->sh_link < elf_numsections (sub)
11236 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11239 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11249 /* Treat mapping symbols as special target symbols. */
11252 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11254 return bfd_is_arm_special_symbol_name (sym->name,
11255 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11258 /* This is a copy of elf_find_function() from elf.c except that
11259 ARM mapping symbols are ignored when looking for function names
11260 and STT_ARM_TFUNC is considered to a function type. */
11263 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11264 asection * section,
11265 asymbol ** symbols,
11267 const char ** filename_ptr,
11268 const char ** functionname_ptr)
11270 const char * filename = NULL;
11271 asymbol * func = NULL;
11272 bfd_vma low_func = 0;
11275 for (p = symbols; *p != NULL; p++)
11277 elf_symbol_type *q;
11279 q = (elf_symbol_type *) *p;
11281 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11286 filename = bfd_asymbol_name (&q->symbol);
11289 case STT_ARM_TFUNC:
11291 /* Skip mapping symbols. */
11292 if ((q->symbol.flags & BSF_LOCAL)
11293 && bfd_is_arm_special_symbol_name (q->symbol.name,
11294 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11296 /* Fall through. */
11297 if (bfd_get_section (&q->symbol) == section
11298 && q->symbol.value >= low_func
11299 && q->symbol.value <= offset)
11301 func = (asymbol *) q;
11302 low_func = q->symbol.value;
11312 *filename_ptr = filename;
11313 if (functionname_ptr)
11314 *functionname_ptr = bfd_asymbol_name (func);
11320 /* Find the nearest line to a particular section and offset, for error
11321 reporting. This code is a duplicate of the code in elf.c, except
11322 that it uses arm_elf_find_function. */
11325 elf32_arm_find_nearest_line (bfd * abfd,
11326 asection * section,
11327 asymbol ** symbols,
11329 const char ** filename_ptr,
11330 const char ** functionname_ptr,
11331 unsigned int * line_ptr)
11333 bfd_boolean found = FALSE;
11335 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11337 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11338 filename_ptr, functionname_ptr,
11340 & elf_tdata (abfd)->dwarf2_find_line_info))
11342 if (!*functionname_ptr)
11343 arm_elf_find_function (abfd, section, symbols, offset,
11344 *filename_ptr ? NULL : filename_ptr,
11350 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11351 & found, filename_ptr,
11352 functionname_ptr, line_ptr,
11353 & elf_tdata (abfd)->line_info))
11356 if (found && (*functionname_ptr || *line_ptr))
11359 if (symbols == NULL)
11362 if (! arm_elf_find_function (abfd, section, symbols, offset,
11363 filename_ptr, functionname_ptr))
11371 elf32_arm_find_inliner_info (bfd * abfd,
11372 const char ** filename_ptr,
11373 const char ** functionname_ptr,
11374 unsigned int * line_ptr)
11377 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11378 functionname_ptr, line_ptr,
11379 & elf_tdata (abfd)->dwarf2_find_line_info);
11383 /* Adjust a symbol defined by a dynamic object and referenced by a
11384 regular object. The current definition is in some section of the
11385 dynamic object, but we're not including those sections. We have to
11386 change the definition to something the rest of the link can
11390 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11391 struct elf_link_hash_entry * h)
11395 struct elf32_arm_link_hash_entry * eh;
11396 struct elf32_arm_link_hash_table *globals;
11398 globals = elf32_arm_hash_table (info);
11399 if (globals == NULL)
11402 dynobj = elf_hash_table (info)->dynobj;
11404 /* Make sure we know what is going on here. */
11405 BFD_ASSERT (dynobj != NULL
11407 || h->u.weakdef != NULL
11410 && !h->def_regular)));
11412 eh = (struct elf32_arm_link_hash_entry *) h;
11414 /* If this is a function, put it in the procedure linkage table. We
11415 will fill in the contents of the procedure linkage table later,
11416 when we know the address of the .got section. */
11417 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11420 if (h->plt.refcount <= 0
11421 || SYMBOL_CALLS_LOCAL (info, h)
11422 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11423 && h->root.type == bfd_link_hash_undefweak))
11425 /* This case can occur if we saw a PLT32 reloc in an input
11426 file, but the symbol was never referred to by a dynamic
11427 object, or if all references were garbage collected. In
11428 such a case, we don't actually need to build a procedure
11429 linkage table, and we can just do a PC24 reloc instead. */
11430 h->plt.offset = (bfd_vma) -1;
11431 eh->plt_thumb_refcount = 0;
11432 eh->plt_maybe_thumb_refcount = 0;
11440 /* It's possible that we incorrectly decided a .plt reloc was
11441 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11442 in check_relocs. We can't decide accurately between function
11443 and non-function syms in check-relocs; Objects loaded later in
11444 the link may change h->type. So fix it now. */
11445 h->plt.offset = (bfd_vma) -1;
11446 eh->plt_thumb_refcount = 0;
11447 eh->plt_maybe_thumb_refcount = 0;
11450 /* If this is a weak symbol, and there is a real definition, the
11451 processor independent code will have arranged for us to see the
11452 real definition first, and we can just use the same value. */
11453 if (h->u.weakdef != NULL)
11455 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11456 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11457 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11458 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11462 /* If there are no non-GOT references, we do not need a copy
11464 if (!h->non_got_ref)
11467 /* This is a reference to a symbol defined by a dynamic object which
11468 is not a function. */
11470 /* If we are creating a shared library, we must presume that the
11471 only references to the symbol are via the global offset table.
11472 For such cases we need not do anything here; the relocations will
11473 be handled correctly by relocate_section. Relocatable executables
11474 can reference data in shared objects directly, so we don't need to
11475 do anything here. */
11476 if (info->shared || globals->root.is_relocatable_executable)
11481 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11482 h->root.root.string);
11486 /* We must allocate the symbol in our .dynbss section, which will
11487 become part of the .bss section of the executable. There will be
11488 an entry for this symbol in the .dynsym section. The dynamic
11489 object will contain position independent code, so all references
11490 from the dynamic object to this symbol will go through the global
11491 offset table. The dynamic linker will use the .dynsym entry to
11492 determine the address it must put in the global offset table, so
11493 both the dynamic object and the regular object will refer to the
11494 same memory location for the variable. */
11495 s = bfd_get_section_by_name (dynobj, ".dynbss");
11496 BFD_ASSERT (s != NULL);
11498 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11499 copy the initial value out of the dynamic object and into the
11500 runtime process image. We need to remember the offset into the
11501 .rel(a).bss section we are going to use. */
11502 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11506 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11507 BFD_ASSERT (srel != NULL);
11508 srel->size += RELOC_SIZE (globals);
11512 return _bfd_elf_adjust_dynamic_copy (h, s);
11515 /* Allocate space in .plt, .got and associated reloc sections for
11519 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11521 struct bfd_link_info *info;
11522 struct elf32_arm_link_hash_table *htab;
11523 struct elf32_arm_link_hash_entry *eh;
11524 struct elf32_arm_relocs_copied *p;
11525 bfd_signed_vma thumb_refs;
11527 eh = (struct elf32_arm_link_hash_entry *) h;
11529 if (h->root.type == bfd_link_hash_indirect)
11532 if (h->root.type == bfd_link_hash_warning)
11533 /* When warning symbols are created, they **replace** the "real"
11534 entry in the hash table, thus we never get to see the real
11535 symbol in a hash traversal. So look at it now. */
11536 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11538 info = (struct bfd_link_info *) inf;
11539 htab = elf32_arm_hash_table (info);
11543 if (htab->root.dynamic_sections_created
11544 && h->plt.refcount > 0)
11546 /* Make sure this symbol is output as a dynamic symbol.
11547 Undefined weak syms won't yet be marked as dynamic. */
11548 if (h->dynindx == -1
11549 && !h->forced_local)
11551 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11556 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11558 asection *s = htab->splt;
11560 /* If this is the first .plt entry, make room for the special
11563 s->size += htab->plt_header_size;
11565 h->plt.offset = s->size;
11567 /* If we will insert a Thumb trampoline before this PLT, leave room
11569 thumb_refs = eh->plt_thumb_refcount;
11570 if (!htab->use_blx)
11571 thumb_refs += eh->plt_maybe_thumb_refcount;
11573 if (thumb_refs > 0)
11575 h->plt.offset += PLT_THUMB_STUB_SIZE;
11576 s->size += PLT_THUMB_STUB_SIZE;
11579 /* If this symbol is not defined in a regular file, and we are
11580 not generating a shared library, then set the symbol to this
11581 location in the .plt. This is required to make function
11582 pointers compare as equal between the normal executable and
11583 the shared library. */
11585 && !h->def_regular)
11587 h->root.u.def.section = s;
11588 h->root.u.def.value = h->plt.offset;
11590 /* Make sure the function is not marked as Thumb, in case
11591 it is the target of an ABS32 relocation, which will
11592 point to the PLT entry. */
11593 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11594 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11597 /* Make room for this entry. */
11598 s->size += htab->plt_entry_size;
11600 if (!htab->symbian_p)
11602 /* We also need to make an entry in the .got.plt section, which
11603 will be placed in the .got section by the linker script. */
11604 eh->plt_got_offset = htab->sgotplt->size;
11605 htab->sgotplt->size += 4;
11608 /* We also need to make an entry in the .rel(a).plt section. */
11609 htab->srelplt->size += RELOC_SIZE (htab);
11611 /* VxWorks executables have a second set of relocations for
11612 each PLT entry. They go in a separate relocation section,
11613 which is processed by the kernel loader. */
11614 if (htab->vxworks_p && !info->shared)
11616 /* There is a relocation for the initial PLT entry:
11617 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11618 if (h->plt.offset == htab->plt_header_size)
11619 htab->srelplt2->size += RELOC_SIZE (htab);
11621 /* There are two extra relocations for each subsequent
11622 PLT entry: an R_ARM_32 relocation for the GOT entry,
11623 and an R_ARM_32 relocation for the PLT entry. */
11624 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11629 h->plt.offset = (bfd_vma) -1;
11635 h->plt.offset = (bfd_vma) -1;
11639 if (h->got.refcount > 0)
11643 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11646 /* Make sure this symbol is output as a dynamic symbol.
11647 Undefined weak syms won't yet be marked as dynamic. */
11648 if (h->dynindx == -1
11649 && !h->forced_local)
11651 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11655 if (!htab->symbian_p)
11658 h->got.offset = s->size;
11660 if (tls_type == GOT_UNKNOWN)
11663 if (tls_type == GOT_NORMAL)
11664 /* Non-TLS symbols need one GOT slot. */
11668 if (tls_type & GOT_TLS_GD)
11669 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11671 if (tls_type & GOT_TLS_IE)
11672 /* R_ARM_TLS_IE32 needs one GOT slot. */
11676 dyn = htab->root.dynamic_sections_created;
11679 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11681 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11684 if (tls_type != GOT_NORMAL
11685 && (info->shared || indx != 0)
11686 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11687 || h->root.type != bfd_link_hash_undefweak))
11689 if (tls_type & GOT_TLS_IE)
11690 htab->srelgot->size += RELOC_SIZE (htab);
11692 if (tls_type & GOT_TLS_GD)
11693 htab->srelgot->size += RELOC_SIZE (htab);
11695 if ((tls_type & GOT_TLS_GD) && indx != 0)
11696 htab->srelgot->size += RELOC_SIZE (htab);
11698 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11699 || h->root.type != bfd_link_hash_undefweak)
11701 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11702 htab->srelgot->size += RELOC_SIZE (htab);
11706 h->got.offset = (bfd_vma) -1;
11708 /* Allocate stubs for exported Thumb functions on v4t. */
11709 if (!htab->use_blx && h->dynindx != -1
11711 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11712 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11714 struct elf_link_hash_entry * th;
11715 struct bfd_link_hash_entry * bh;
11716 struct elf_link_hash_entry * myh;
11720 /* Create a new symbol to regist the real location of the function. */
11721 s = h->root.u.def.section;
11722 sprintf (name, "__real_%s", h->root.root.string);
11723 _bfd_generic_link_add_one_symbol (info, s->owner,
11724 name, BSF_GLOBAL, s,
11725 h->root.u.def.value,
11726 NULL, TRUE, FALSE, &bh);
11728 myh = (struct elf_link_hash_entry *) bh;
11729 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11730 myh->forced_local = 1;
11731 eh->export_glue = myh;
11732 th = record_arm_to_thumb_glue (info, h);
11733 /* Point the symbol at the stub. */
11734 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11735 h->root.u.def.section = th->root.u.def.section;
11736 h->root.u.def.value = th->root.u.def.value & ~1;
11739 if (eh->relocs_copied == NULL)
11742 /* In the shared -Bsymbolic case, discard space allocated for
11743 dynamic pc-relative relocs against symbols which turn out to be
11744 defined in regular objects. For the normal shared case, discard
11745 space for pc-relative relocs that have become local due to symbol
11746 visibility changes. */
11748 if (info->shared || htab->root.is_relocatable_executable)
11750 /* The only relocs that use pc_count are R_ARM_REL32 and
11751 R_ARM_REL32_NOI, which will appear on something like
11752 ".long foo - .". We want calls to protected symbols to resolve
11753 directly to the function rather than going via the plt. If people
11754 want function pointer comparisons to work as expected then they
11755 should avoid writing assembly like ".long foo - .". */
11756 if (SYMBOL_CALLS_LOCAL (info, h))
11758 struct elf32_arm_relocs_copied **pp;
11760 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11762 p->count -= p->pc_count;
11771 if (htab->vxworks_p)
11773 struct elf32_arm_relocs_copied **pp;
11775 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11777 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11784 /* Also discard relocs on undefined weak syms with non-default
11786 if (eh->relocs_copied != NULL
11787 && h->root.type == bfd_link_hash_undefweak)
11789 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11790 eh->relocs_copied = NULL;
11792 /* Make sure undefined weak symbols are output as a dynamic
11794 else if (h->dynindx == -1
11795 && !h->forced_local)
11797 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11802 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11803 && h->root.type == bfd_link_hash_new)
11805 /* Output absolute symbols so that we can create relocations
11806 against them. For normal symbols we output a relocation
11807 against the section that contains them. */
11808 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11815 /* For the non-shared case, discard space for relocs against
11816 symbols which turn out to need copy relocs or are not
11819 if (!h->non_got_ref
11820 && ((h->def_dynamic
11821 && !h->def_regular)
11822 || (htab->root.dynamic_sections_created
11823 && (h->root.type == bfd_link_hash_undefweak
11824 || h->root.type == bfd_link_hash_undefined))))
11826 /* Make sure this symbol is output as a dynamic symbol.
11827 Undefined weak syms won't yet be marked as dynamic. */
11828 if (h->dynindx == -1
11829 && !h->forced_local)
11831 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11835 /* If that succeeded, we know we'll be keeping all the
11837 if (h->dynindx != -1)
11841 eh->relocs_copied = NULL;
11846 /* Finally, allocate space. */
11847 for (p = eh->relocs_copied; p != NULL; p = p->next)
11849 asection *sreloc = elf_section_data (p->section)->sreloc;
11850 sreloc->size += p->count * RELOC_SIZE (htab);
11856 /* Find any dynamic relocs that apply to read-only sections. */
11859 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11861 struct elf32_arm_link_hash_entry * eh;
11862 struct elf32_arm_relocs_copied * p;
11864 if (h->root.type == bfd_link_hash_warning)
11865 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11867 eh = (struct elf32_arm_link_hash_entry *) h;
11868 for (p = eh->relocs_copied; p != NULL; p = p->next)
11870 asection *s = p->section;
11872 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11874 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11876 info->flags |= DF_TEXTREL;
11878 /* Not an error, just cut short the traversal. */
11886 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11889 struct elf32_arm_link_hash_table *globals;
11891 globals = elf32_arm_hash_table (info);
11892 if (globals == NULL)
11895 globals->byteswap_code = byteswap_code;
11898 /* Set the sizes of the dynamic sections. */
11901 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11902 struct bfd_link_info * info)
11907 bfd_boolean relocs;
11909 struct elf32_arm_link_hash_table *htab;
11911 htab = elf32_arm_hash_table (info);
11915 dynobj = elf_hash_table (info)->dynobj;
11916 BFD_ASSERT (dynobj != NULL);
11917 check_use_blx (htab);
11919 if (elf_hash_table (info)->dynamic_sections_created)
11921 /* Set the contents of the .interp section to the interpreter. */
11922 if (info->executable)
11924 s = bfd_get_section_by_name (dynobj, ".interp");
11925 BFD_ASSERT (s != NULL);
11926 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11927 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11931 /* Set up .got offsets for local syms, and space for local dynamic
11933 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11935 bfd_signed_vma *local_got;
11936 bfd_signed_vma *end_local_got;
11937 char *local_tls_type;
11938 bfd_size_type locsymcount;
11939 Elf_Internal_Shdr *symtab_hdr;
11941 bfd_boolean is_vxworks = htab->vxworks_p;
11943 if (! is_arm_elf (ibfd))
11946 for (s = ibfd->sections; s != NULL; s = s->next)
11948 struct elf32_arm_relocs_copied *p;
11950 for (p = (struct elf32_arm_relocs_copied *)
11951 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11953 if (!bfd_is_abs_section (p->section)
11954 && bfd_is_abs_section (p->section->output_section))
11956 /* Input section has been discarded, either because
11957 it is a copy of a linkonce section or due to
11958 linker script /DISCARD/, so we'll be discarding
11961 else if (is_vxworks
11962 && strcmp (p->section->output_section->name,
11965 /* Relocations in vxworks .tls_vars sections are
11966 handled specially by the loader. */
11968 else if (p->count != 0)
11970 srel = elf_section_data (p->section)->sreloc;
11971 srel->size += p->count * RELOC_SIZE (htab);
11972 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11973 info->flags |= DF_TEXTREL;
11978 local_got = elf_local_got_refcounts (ibfd);
11982 symtab_hdr = & elf_symtab_hdr (ibfd);
11983 locsymcount = symtab_hdr->sh_info;
11984 end_local_got = local_got + locsymcount;
11985 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11987 srel = htab->srelgot;
11988 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11990 if (*local_got > 0)
11992 *local_got = s->size;
11993 if (*local_tls_type & GOT_TLS_GD)
11994 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11996 if (*local_tls_type & GOT_TLS_IE)
11998 if (*local_tls_type == GOT_NORMAL)
12001 if (info->shared || *local_tls_type == GOT_TLS_GD)
12002 srel->size += RELOC_SIZE (htab);
12005 *local_got = (bfd_vma) -1;
12009 if (htab->tls_ldm_got.refcount > 0)
12011 /* Allocate two GOT entries and one dynamic relocation (if necessary)
12012 for R_ARM_TLS_LDM32 relocations. */
12013 htab->tls_ldm_got.offset = htab->sgot->size;
12014 htab->sgot->size += 8;
12016 htab->srelgot->size += RELOC_SIZE (htab);
12019 htab->tls_ldm_got.offset = -1;
12021 /* Allocate global sym .plt and .got entries, and space for global
12022 sym dynamic relocs. */
12023 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
12025 /* Here we rummage through the found bfds to collect glue information. */
12026 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
12028 if (! is_arm_elf (ibfd))
12031 /* Initialise mapping tables for code/data. */
12032 bfd_elf32_arm_init_maps (ibfd);
12034 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
12035 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
12036 /* xgettext:c-format */
12037 _bfd_error_handler (_("Errors encountered processing file %s"),
12041 /* Allocate space for the glue sections now that we've sized them. */
12042 bfd_elf32_arm_allocate_interworking_sections (info);
12044 /* The check_relocs and adjust_dynamic_symbol entry points have
12045 determined the sizes of the various dynamic sections. Allocate
12046 memory for them. */
12049 for (s = dynobj->sections; s != NULL; s = s->next)
12053 if ((s->flags & SEC_LINKER_CREATED) == 0)
12056 /* It's OK to base decisions on the section name, because none
12057 of the dynobj section names depend upon the input files. */
12058 name = bfd_get_section_name (dynobj, s);
12060 if (strcmp (name, ".plt") == 0)
12062 /* Remember whether there is a PLT. */
12063 plt = s->size != 0;
12065 else if (CONST_STRNEQ (name, ".rel"))
12069 /* Remember whether there are any reloc sections other
12070 than .rel(a).plt and .rela.plt.unloaded. */
12071 if (s != htab->srelplt && s != htab->srelplt2)
12074 /* We use the reloc_count field as a counter if we need
12075 to copy relocs into the output file. */
12076 s->reloc_count = 0;
12079 else if (! CONST_STRNEQ (name, ".got")
12080 && strcmp (name, ".dynbss") != 0)
12082 /* It's not one of our sections, so don't allocate space. */
12088 /* If we don't need this section, strip it from the
12089 output file. This is mostly to handle .rel(a).bss and
12090 .rel(a).plt. We must create both sections in
12091 create_dynamic_sections, because they must be created
12092 before the linker maps input sections to output
12093 sections. The linker does that before
12094 adjust_dynamic_symbol is called, and it is that
12095 function which decides whether anything needs to go
12096 into these sections. */
12097 s->flags |= SEC_EXCLUDE;
12101 if ((s->flags & SEC_HAS_CONTENTS) == 0)
12104 /* Allocate memory for the section contents. */
12105 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
12106 if (s->contents == NULL)
12110 if (elf_hash_table (info)->dynamic_sections_created)
12112 /* Add some entries to the .dynamic section. We fill in the
12113 values later, in elf32_arm_finish_dynamic_sections, but we
12114 must add the entries now so that we get the correct size for
12115 the .dynamic section. The DT_DEBUG entry is filled in by the
12116 dynamic linker and used by the debugger. */
12117 #define add_dynamic_entry(TAG, VAL) \
12118 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
12120 if (info->executable)
12122 if (!add_dynamic_entry (DT_DEBUG, 0))
12128 if ( !add_dynamic_entry (DT_PLTGOT, 0)
12129 || !add_dynamic_entry (DT_PLTRELSZ, 0)
12130 || !add_dynamic_entry (DT_PLTREL,
12131 htab->use_rel ? DT_REL : DT_RELA)
12132 || !add_dynamic_entry (DT_JMPREL, 0))
12140 if (!add_dynamic_entry (DT_REL, 0)
12141 || !add_dynamic_entry (DT_RELSZ, 0)
12142 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12147 if (!add_dynamic_entry (DT_RELA, 0)
12148 || !add_dynamic_entry (DT_RELASZ, 0)
12149 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12154 /* If any dynamic relocs apply to a read-only section,
12155 then we need a DT_TEXTREL entry. */
12156 if ((info->flags & DF_TEXTREL) == 0)
12157 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12160 if ((info->flags & DF_TEXTREL) != 0)
12162 if (!add_dynamic_entry (DT_TEXTREL, 0))
12165 if (htab->vxworks_p
12166 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12169 #undef add_dynamic_entry
12174 /* Finish up dynamic symbol handling. We set the contents of various
12175 dynamic sections here. */
12178 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12179 struct bfd_link_info * info,
12180 struct elf_link_hash_entry * h,
12181 Elf_Internal_Sym * sym)
12184 struct elf32_arm_link_hash_table *htab;
12185 struct elf32_arm_link_hash_entry *eh;
12187 dynobj = elf_hash_table (info)->dynobj;
12188 htab = elf32_arm_hash_table (info);
12192 eh = (struct elf32_arm_link_hash_entry *) h;
12194 if (h->plt.offset != (bfd_vma) -1)
12200 Elf_Internal_Rela rel;
12202 /* This symbol has an entry in the procedure linkage table. Set
12205 BFD_ASSERT (h->dynindx != -1);
12207 splt = bfd_get_section_by_name (dynobj, ".plt");
12208 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12209 BFD_ASSERT (splt != NULL && srel != NULL);
12211 /* Fill in the entry in the procedure linkage table. */
12212 if (htab->symbian_p)
12214 put_arm_insn (htab, output_bfd,
12215 elf32_arm_symbian_plt_entry[0],
12216 splt->contents + h->plt.offset);
12217 bfd_put_32 (output_bfd,
12218 elf32_arm_symbian_plt_entry[1],
12219 splt->contents + h->plt.offset + 4);
12221 /* Fill in the entry in the .rel.plt section. */
12222 rel.r_offset = (splt->output_section->vma
12223 + splt->output_offset
12224 + h->plt.offset + 4);
12225 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12227 /* Get the index in the procedure linkage table which
12228 corresponds to this symbol. This is the index of this symbol
12229 in all the symbols for which we are making plt entries. The
12230 first entry in the procedure linkage table is reserved. */
12231 plt_index = ((h->plt.offset - htab->plt_header_size)
12232 / htab->plt_entry_size);
12236 bfd_vma got_offset, got_address, plt_address;
12237 bfd_vma got_displacement;
12241 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12242 BFD_ASSERT (sgot != NULL);
12244 /* Get the offset into the .got.plt table of the entry that
12245 corresponds to this function. */
12246 got_offset = eh->plt_got_offset;
12248 /* Get the index in the procedure linkage table which
12249 corresponds to this symbol. This is the index of this symbol
12250 in all the symbols for which we are making plt entries. The
12251 first three entries in .got.plt are reserved; after that
12252 symbols appear in the same order as in .plt. */
12253 plt_index = (got_offset - 12) / 4;
12255 /* Calculate the address of the GOT entry. */
12256 got_address = (sgot->output_section->vma
12257 + sgot->output_offset
12260 /* ...and the address of the PLT entry. */
12261 plt_address = (splt->output_section->vma
12262 + splt->output_offset
12265 ptr = htab->splt->contents + h->plt.offset;
12266 if (htab->vxworks_p && info->shared)
12271 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12273 val = elf32_arm_vxworks_shared_plt_entry[i];
12275 val |= got_address - sgot->output_section->vma;
12277 val |= plt_index * RELOC_SIZE (htab);
12278 if (i == 2 || i == 5)
12279 bfd_put_32 (output_bfd, val, ptr);
12281 put_arm_insn (htab, output_bfd, val, ptr);
12284 else if (htab->vxworks_p)
12289 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12291 val = elf32_arm_vxworks_exec_plt_entry[i];
12293 val |= got_address;
12295 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12297 val |= plt_index * RELOC_SIZE (htab);
12298 if (i == 2 || i == 5)
12299 bfd_put_32 (output_bfd, val, ptr);
12301 put_arm_insn (htab, output_bfd, val, ptr);
12304 loc = (htab->srelplt2->contents
12305 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12307 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12308 referencing the GOT for this PLT entry. */
12309 rel.r_offset = plt_address + 8;
12310 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12311 rel.r_addend = got_offset;
12312 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12313 loc += RELOC_SIZE (htab);
12315 /* Create the R_ARM_ABS32 relocation referencing the
12316 beginning of the PLT for this GOT entry. */
12317 rel.r_offset = got_address;
12318 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12320 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12324 bfd_signed_vma thumb_refs;
12325 /* Calculate the displacement between the PLT slot and the
12326 entry in the GOT. The eight-byte offset accounts for the
12327 value produced by adding to pc in the first instruction
12328 of the PLT stub. */
12329 got_displacement = got_address - (plt_address + 8);
12331 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12333 thumb_refs = eh->plt_thumb_refcount;
12334 if (!htab->use_blx)
12335 thumb_refs += eh->plt_maybe_thumb_refcount;
12337 if (thumb_refs > 0)
12339 put_thumb_insn (htab, output_bfd,
12340 elf32_arm_plt_thumb_stub[0], ptr - 4);
12341 put_thumb_insn (htab, output_bfd,
12342 elf32_arm_plt_thumb_stub[1], ptr - 2);
12345 put_arm_insn (htab, output_bfd,
12346 elf32_arm_plt_entry[0]
12347 | ((got_displacement & 0x0ff00000) >> 20),
12349 put_arm_insn (htab, output_bfd,
12350 elf32_arm_plt_entry[1]
12351 | ((got_displacement & 0x000ff000) >> 12),
12353 put_arm_insn (htab, output_bfd,
12354 elf32_arm_plt_entry[2]
12355 | (got_displacement & 0x00000fff),
12357 #ifdef FOUR_WORD_PLT
12358 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12362 /* Fill in the entry in the global offset table. */
12363 bfd_put_32 (output_bfd,
12364 (splt->output_section->vma
12365 + splt->output_offset),
12366 sgot->contents + got_offset);
12368 /* Fill in the entry in the .rel(a).plt section. */
12370 rel.r_offset = got_address;
12371 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12374 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12375 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12377 if (!h->def_regular)
12379 /* Mark the symbol as undefined, rather than as defined in
12380 the .plt section. Leave the value alone. */
12381 sym->st_shndx = SHN_UNDEF;
12382 /* If the symbol is weak, we do need to clear the value.
12383 Otherwise, the PLT entry would provide a definition for
12384 the symbol even if the symbol wasn't defined anywhere,
12385 and so the symbol would never be NULL. */
12386 if (!h->ref_regular_nonweak)
12391 if (h->got.offset != (bfd_vma) -1
12392 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12393 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12397 Elf_Internal_Rela rel;
12401 /* This symbol has an entry in the global offset table. Set it
12403 sgot = bfd_get_section_by_name (dynobj, ".got");
12404 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12405 BFD_ASSERT (sgot != NULL && srel != NULL);
12407 offset = (h->got.offset & ~(bfd_vma) 1);
12409 rel.r_offset = (sgot->output_section->vma
12410 + sgot->output_offset
12413 /* If this is a static link, or it is a -Bsymbolic link and the
12414 symbol is defined locally or was forced to be local because
12415 of a version file, we just want to emit a RELATIVE reloc.
12416 The entry in the global offset table will already have been
12417 initialized in the relocate_section function. */
12419 && SYMBOL_REFERENCES_LOCAL (info, h))
12421 BFD_ASSERT ((h->got.offset & 1) != 0);
12422 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12423 if (!htab->use_rel)
12425 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12426 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12431 BFD_ASSERT ((h->got.offset & 1) == 0);
12432 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12433 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12436 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12437 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12443 Elf_Internal_Rela rel;
12446 /* This symbol needs a copy reloc. Set it up. */
12447 BFD_ASSERT (h->dynindx != -1
12448 && (h->root.type == bfd_link_hash_defined
12449 || h->root.type == bfd_link_hash_defweak));
12451 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12452 RELOC_SECTION (htab, ".bss"));
12453 BFD_ASSERT (s != NULL);
12456 rel.r_offset = (h->root.u.def.value
12457 + h->root.u.def.section->output_section->vma
12458 + h->root.u.def.section->output_offset);
12459 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12460 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12461 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12464 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12465 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12466 to the ".got" section. */
12467 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12468 || (!htab->vxworks_p && h == htab->root.hgot))
12469 sym->st_shndx = SHN_ABS;
12474 /* Finish up the dynamic sections. */
12477 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12482 struct elf32_arm_link_hash_table *htab;
12484 htab = elf32_arm_hash_table (info);
12488 dynobj = elf_hash_table (info)->dynobj;
12490 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12491 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12492 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12494 if (elf_hash_table (info)->dynamic_sections_created)
12497 Elf32_External_Dyn *dyncon, *dynconend;
12499 splt = bfd_get_section_by_name (dynobj, ".plt");
12500 BFD_ASSERT (splt != NULL && sdyn != NULL);
12502 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12503 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12505 for (; dyncon < dynconend; dyncon++)
12507 Elf_Internal_Dyn dyn;
12511 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12518 if (htab->vxworks_p
12519 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12520 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12525 goto get_vma_if_bpabi;
12528 goto get_vma_if_bpabi;
12531 goto get_vma_if_bpabi;
12533 name = ".gnu.version";
12534 goto get_vma_if_bpabi;
12536 name = ".gnu.version_d";
12537 goto get_vma_if_bpabi;
12539 name = ".gnu.version_r";
12540 goto get_vma_if_bpabi;
12546 name = RELOC_SECTION (htab, ".plt");
12548 s = bfd_get_section_by_name (output_bfd, name);
12549 BFD_ASSERT (s != NULL);
12550 if (!htab->symbian_p)
12551 dyn.d_un.d_ptr = s->vma;
12553 /* In the BPABI, tags in the PT_DYNAMIC section point
12554 at the file offset, not the memory address, for the
12555 convenience of the post linker. */
12556 dyn.d_un.d_ptr = s->filepos;
12557 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12561 if (htab->symbian_p)
12566 s = bfd_get_section_by_name (output_bfd,
12567 RELOC_SECTION (htab, ".plt"));
12568 BFD_ASSERT (s != NULL);
12569 dyn.d_un.d_val = s->size;
12570 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12575 if (!htab->symbian_p)
12577 /* My reading of the SVR4 ABI indicates that the
12578 procedure linkage table relocs (DT_JMPREL) should be
12579 included in the overall relocs (DT_REL). This is
12580 what Solaris does. However, UnixWare can not handle
12581 that case. Therefore, we override the DT_RELSZ entry
12582 here to make it not include the JMPREL relocs. Since
12583 the linker script arranges for .rel(a).plt to follow all
12584 other relocation sections, we don't have to worry
12585 about changing the DT_REL entry. */
12586 s = bfd_get_section_by_name (output_bfd,
12587 RELOC_SECTION (htab, ".plt"));
12589 dyn.d_un.d_val -= s->size;
12590 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12593 /* Fall through. */
12597 /* In the BPABI, the DT_REL tag must point at the file
12598 offset, not the VMA, of the first relocation
12599 section. So, we use code similar to that in
12600 elflink.c, but do not check for SHF_ALLOC on the
12601 relcoation section, since relocations sections are
12602 never allocated under the BPABI. The comments above
12603 about Unixware notwithstanding, we include all of the
12604 relocations here. */
12605 if (htab->symbian_p)
12608 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12609 ? SHT_REL : SHT_RELA);
12610 dyn.d_un.d_val = 0;
12611 for (i = 1; i < elf_numsections (output_bfd); i++)
12613 Elf_Internal_Shdr *hdr
12614 = elf_elfsections (output_bfd)[i];
12615 if (hdr->sh_type == type)
12617 if (dyn.d_tag == DT_RELSZ
12618 || dyn.d_tag == DT_RELASZ)
12619 dyn.d_un.d_val += hdr->sh_size;
12620 else if ((ufile_ptr) hdr->sh_offset
12621 <= dyn.d_un.d_val - 1)
12622 dyn.d_un.d_val = hdr->sh_offset;
12625 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12629 /* Set the bottom bit of DT_INIT/FINI if the
12630 corresponding function is Thumb. */
12632 name = info->init_function;
12635 name = info->fini_function;
12637 /* If it wasn't set by elf_bfd_final_link
12638 then there is nothing to adjust. */
12639 if (dyn.d_un.d_val != 0)
12641 struct elf_link_hash_entry * eh;
12643 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12644 FALSE, FALSE, TRUE);
12646 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12648 dyn.d_un.d_val |= 1;
12649 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12656 /* Fill in the first entry in the procedure linkage table. */
12657 if (splt->size > 0 && htab->plt_header_size)
12659 const bfd_vma *plt0_entry;
12660 bfd_vma got_address, plt_address, got_displacement;
12662 /* Calculate the addresses of the GOT and PLT. */
12663 got_address = sgot->output_section->vma + sgot->output_offset;
12664 plt_address = splt->output_section->vma + splt->output_offset;
12666 if (htab->vxworks_p)
12668 /* The VxWorks GOT is relocated by the dynamic linker.
12669 Therefore, we must emit relocations rather than simply
12670 computing the values now. */
12671 Elf_Internal_Rela rel;
12673 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12674 put_arm_insn (htab, output_bfd, plt0_entry[0],
12675 splt->contents + 0);
12676 put_arm_insn (htab, output_bfd, plt0_entry[1],
12677 splt->contents + 4);
12678 put_arm_insn (htab, output_bfd, plt0_entry[2],
12679 splt->contents + 8);
12680 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12682 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12683 rel.r_offset = plt_address + 12;
12684 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12686 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12687 htab->srelplt2->contents);
12691 got_displacement = got_address - (plt_address + 16);
12693 plt0_entry = elf32_arm_plt0_entry;
12694 put_arm_insn (htab, output_bfd, plt0_entry[0],
12695 splt->contents + 0);
12696 put_arm_insn (htab, output_bfd, plt0_entry[1],
12697 splt->contents + 4);
12698 put_arm_insn (htab, output_bfd, plt0_entry[2],
12699 splt->contents + 8);
12700 put_arm_insn (htab, output_bfd, plt0_entry[3],
12701 splt->contents + 12);
12703 #ifdef FOUR_WORD_PLT
12704 /* The displacement value goes in the otherwise-unused
12705 last word of the second entry. */
12706 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12708 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12713 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12714 really seem like the right value. */
12715 if (splt->output_section->owner == output_bfd)
12716 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12718 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12720 /* Correct the .rel(a).plt.unloaded relocations. They will have
12721 incorrect symbol indexes. */
12725 num_plts = ((htab->splt->size - htab->plt_header_size)
12726 / htab->plt_entry_size);
12727 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12729 for (; num_plts; num_plts--)
12731 Elf_Internal_Rela rel;
12733 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12734 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12735 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12736 p += RELOC_SIZE (htab);
12738 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12739 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12740 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12741 p += RELOC_SIZE (htab);
12746 /* Fill in the first three entries in the global offset table. */
12749 if (sgot->size > 0)
12752 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12754 bfd_put_32 (output_bfd,
12755 sdyn->output_section->vma + sdyn->output_offset,
12757 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12758 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12761 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12768 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12770 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12771 struct elf32_arm_link_hash_table *globals;
12773 i_ehdrp = elf_elfheader (abfd);
12775 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12776 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12778 i_ehdrp->e_ident[EI_OSABI] = 0;
12779 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12783 globals = elf32_arm_hash_table (link_info);
12784 if (globals != NULL && globals->byteswap_code)
12785 i_ehdrp->e_flags |= EF_ARM_BE8;
12789 static enum elf_reloc_type_class
12790 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12792 switch ((int) ELF32_R_TYPE (rela->r_info))
12794 case R_ARM_RELATIVE:
12795 return reloc_class_relative;
12796 case R_ARM_JUMP_SLOT:
12797 return reloc_class_plt;
12799 return reloc_class_copy;
12801 return reloc_class_normal;
12805 /* Set the right machine number for an Arm ELF file. */
12808 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12810 if (hdr->sh_type == SHT_NOTE)
12811 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12817 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12819 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12822 /* Return TRUE if this is an unwinding table entry. */
12825 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12827 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12828 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12832 /* Set the type and flags for an ARM section. We do this by
12833 the section name, which is a hack, but ought to work. */
12836 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12840 name = bfd_get_section_name (abfd, sec);
12842 if (is_arm_elf_unwind_section_name (abfd, name))
12844 hdr->sh_type = SHT_ARM_EXIDX;
12845 hdr->sh_flags |= SHF_LINK_ORDER;
12850 /* Handle an ARM specific section when reading an object file. This is
12851 called when bfd_section_from_shdr finds a section with an unknown
12855 elf32_arm_section_from_shdr (bfd *abfd,
12856 Elf_Internal_Shdr * hdr,
12860 /* There ought to be a place to keep ELF backend specific flags, but
12861 at the moment there isn't one. We just keep track of the
12862 sections by their name, instead. Fortunately, the ABI gives
12863 names for all the ARM specific sections, so we will probably get
12865 switch (hdr->sh_type)
12867 case SHT_ARM_EXIDX:
12868 case SHT_ARM_PREEMPTMAP:
12869 case SHT_ARM_ATTRIBUTES:
12876 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12882 static _arm_elf_section_data *
12883 get_arm_elf_section_data (asection * sec)
12885 if (sec && sec->owner && is_arm_elf (sec->owner))
12886 return elf32_arm_section_data (sec);
12894 struct bfd_link_info *info;
12897 int (*func) (void *, const char *, Elf_Internal_Sym *,
12898 asection *, struct elf_link_hash_entry *);
12899 } output_arch_syminfo;
12901 enum map_symbol_type
12909 /* Output a single mapping symbol. */
12912 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12913 enum map_symbol_type type,
12916 static const char *names[3] = {"$a", "$t", "$d"};
12917 Elf_Internal_Sym sym;
12919 sym.st_value = osi->sec->output_section->vma
12920 + osi->sec->output_offset
12924 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12925 sym.st_shndx = osi->sec_shndx;
12926 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12927 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12931 /* Output mapping symbols for PLT entries associated with H. */
12934 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12936 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12937 struct elf32_arm_link_hash_table *htab;
12938 struct elf32_arm_link_hash_entry *eh;
12941 if (h->root.type == bfd_link_hash_indirect)
12944 if (h->root.type == bfd_link_hash_warning)
12945 /* When warning symbols are created, they **replace** the "real"
12946 entry in the hash table, thus we never get to see the real
12947 symbol in a hash traversal. So look at it now. */
12948 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12950 if (h->plt.offset == (bfd_vma) -1)
12953 htab = elf32_arm_hash_table (osi->info);
12957 eh = (struct elf32_arm_link_hash_entry *) h;
12958 addr = h->plt.offset;
12959 if (htab->symbian_p)
12961 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12963 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12966 else if (htab->vxworks_p)
12968 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12970 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12972 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12974 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12979 bfd_signed_vma thumb_refs;
12981 thumb_refs = eh->plt_thumb_refcount;
12982 if (!htab->use_blx)
12983 thumb_refs += eh->plt_maybe_thumb_refcount;
12985 if (thumb_refs > 0)
12987 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12990 #ifdef FOUR_WORD_PLT
12991 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12993 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12996 /* A three-word PLT with no Thumb thunk contains only Arm code,
12997 so only need to output a mapping symbol for the first PLT entry and
12998 entries with thumb thunks. */
12999 if (thumb_refs > 0 || addr == 20)
13001 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
13010 /* Output a single local symbol for a generated stub. */
13013 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
13014 bfd_vma offset, bfd_vma size)
13016 Elf_Internal_Sym sym;
13018 sym.st_value = osi->sec->output_section->vma
13019 + osi->sec->output_offset
13021 sym.st_size = size;
13023 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13024 sym.st_shndx = osi->sec_shndx;
13025 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
13029 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
13032 struct elf32_arm_stub_hash_entry *stub_entry;
13033 asection *stub_sec;
13036 output_arch_syminfo *osi;
13037 const insn_sequence *template_sequence;
13038 enum stub_insn_type prev_type;
13041 enum map_symbol_type sym_type;
13043 /* Massage our args to the form they really have. */
13044 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13045 osi = (output_arch_syminfo *) in_arg;
13047 stub_sec = stub_entry->stub_sec;
13049 /* Ensure this stub is attached to the current section being
13051 if (stub_sec != osi->sec)
13054 addr = (bfd_vma) stub_entry->stub_offset;
13055 stub_name = stub_entry->output_name;
13057 template_sequence = stub_entry->stub_template;
13058 switch (template_sequence[0].type)
13061 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
13066 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
13067 stub_entry->stub_size))
13075 prev_type = DATA_TYPE;
13077 for (i = 0; i < stub_entry->stub_template_size; i++)
13079 switch (template_sequence[i].type)
13082 sym_type = ARM_MAP_ARM;
13087 sym_type = ARM_MAP_THUMB;
13091 sym_type = ARM_MAP_DATA;
13099 if (template_sequence[i].type != prev_type)
13101 prev_type = template_sequence[i].type;
13102 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13106 switch (template_sequence[i].type)
13130 /* Output mapping symbols for linker generated sections,
13131 and for those data-only sections that do not have a
13135 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13136 struct bfd_link_info *info,
13138 int (*func) (void *, const char *,
13139 Elf_Internal_Sym *,
13141 struct elf_link_hash_entry *))
13143 output_arch_syminfo osi;
13144 struct elf32_arm_link_hash_table *htab;
13146 bfd_size_type size;
13149 htab = elf32_arm_hash_table (info);
13153 check_use_blx (htab);
13159 /* Add a $d mapping symbol to data-only sections that
13160 don't have any mapping symbol. This may result in (harmless) redundant
13161 mapping symbols. */
13162 for (input_bfd = info->input_bfds;
13164 input_bfd = input_bfd->link_next)
13166 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13167 for (osi.sec = input_bfd->sections;
13169 osi.sec = osi.sec->next)
13171 if (osi.sec->output_section != NULL
13172 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13174 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13175 == SEC_HAS_CONTENTS
13176 && get_arm_elf_section_data (osi.sec) != NULL
13177 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13178 && osi.sec->size > 0)
13180 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13181 (output_bfd, osi.sec->output_section);
13182 if (osi.sec_shndx != (int)SHN_BAD)
13183 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13188 /* ARM->Thumb glue. */
13189 if (htab->arm_glue_size > 0)
13191 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13192 ARM2THUMB_GLUE_SECTION_NAME);
13194 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13195 (output_bfd, osi.sec->output_section);
13196 if (info->shared || htab->root.is_relocatable_executable
13197 || htab->pic_veneer)
13198 size = ARM2THUMB_PIC_GLUE_SIZE;
13199 else if (htab->use_blx)
13200 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13202 size = ARM2THUMB_STATIC_GLUE_SIZE;
13204 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13206 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13207 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13211 /* Thumb->ARM glue. */
13212 if (htab->thumb_glue_size > 0)
13214 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13215 THUMB2ARM_GLUE_SECTION_NAME);
13217 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13218 (output_bfd, osi.sec->output_section);
13219 size = THUMB2ARM_GLUE_SIZE;
13221 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13223 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13224 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13228 /* ARMv4 BX veneers. */
13229 if (htab->bx_glue_size > 0)
13231 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13232 ARM_BX_GLUE_SECTION_NAME);
13234 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13235 (output_bfd, osi.sec->output_section);
13237 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13240 /* Long calls stubs. */
13241 if (htab->stub_bfd && htab->stub_bfd->sections)
13243 asection* stub_sec;
13245 for (stub_sec = htab->stub_bfd->sections;
13247 stub_sec = stub_sec->next)
13249 /* Ignore non-stub sections. */
13250 if (!strstr (stub_sec->name, STUB_SUFFIX))
13253 osi.sec = stub_sec;
13255 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13256 (output_bfd, osi.sec->output_section);
13258 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13262 /* Finally, output mapping symbols for the PLT. */
13263 if (!htab->splt || htab->splt->size == 0)
13266 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13267 htab->splt->output_section);
13268 osi.sec = htab->splt;
13269 /* Output mapping symbols for the plt header. SymbianOS does not have a
13271 if (htab->vxworks_p)
13273 /* VxWorks shared libraries have no PLT header. */
13276 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13278 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13282 else if (!htab->symbian_p)
13284 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13286 #ifndef FOUR_WORD_PLT
13287 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13292 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13296 /* Allocate target specific section data. */
13299 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13301 if (!sec->used_by_bfd)
13303 _arm_elf_section_data *sdata;
13304 bfd_size_type amt = sizeof (*sdata);
13306 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13309 sec->used_by_bfd = sdata;
13312 return _bfd_elf_new_section_hook (abfd, sec);
13316 /* Used to order a list of mapping symbols by address. */
13319 elf32_arm_compare_mapping (const void * a, const void * b)
13321 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13322 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13324 if (amap->vma > bmap->vma)
13326 else if (amap->vma < bmap->vma)
13328 else if (amap->type > bmap->type)
13329 /* Ensure results do not depend on the host qsort for objects with
13330 multiple mapping symbols at the same address by sorting on type
13333 else if (amap->type < bmap->type)
13339 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13341 static unsigned long
13342 offset_prel31 (unsigned long addr, bfd_vma offset)
13344 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13347 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13351 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13353 unsigned long first_word = bfd_get_32 (output_bfd, from);
13354 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13356 /* High bit of first word is supposed to be zero. */
13357 if ((first_word & 0x80000000ul) == 0)
13358 first_word = offset_prel31 (first_word, offset);
13360 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13361 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13362 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13363 second_word = offset_prel31 (second_word, offset);
13365 bfd_put_32 (output_bfd, first_word, to);
13366 bfd_put_32 (output_bfd, second_word, to + 4);
13369 /* Data for make_branch_to_a8_stub(). */
13371 struct a8_branch_to_stub_data {
13372 asection *writing_section;
13373 bfd_byte *contents;
13377 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13378 places for a particular section. */
13381 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13384 struct elf32_arm_stub_hash_entry *stub_entry;
13385 struct a8_branch_to_stub_data *data;
13386 bfd_byte *contents;
13387 unsigned long branch_insn;
13388 bfd_vma veneered_insn_loc, veneer_entry_loc;
13389 bfd_signed_vma branch_offset;
13391 unsigned int target;
13393 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13394 data = (struct a8_branch_to_stub_data *) in_arg;
13396 if (stub_entry->target_section != data->writing_section
13397 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
13400 contents = data->contents;
13402 veneered_insn_loc = stub_entry->target_section->output_section->vma
13403 + stub_entry->target_section->output_offset
13404 + stub_entry->target_value;
13406 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13407 + stub_entry->stub_sec->output_offset
13408 + stub_entry->stub_offset;
13410 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13411 veneered_insn_loc &= ~3u;
13413 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13415 abfd = stub_entry->target_section->owner;
13416 target = stub_entry->target_value;
13418 /* We attempt to avoid this condition by setting stubs_always_after_branch
13419 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13420 This check is just to be on the safe side... */
13421 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13423 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13424 "allocated in unsafe location"), abfd);
13428 switch (stub_entry->stub_type)
13430 case arm_stub_a8_veneer_b:
13431 case arm_stub_a8_veneer_b_cond:
13432 branch_insn = 0xf0009000;
13435 case arm_stub_a8_veneer_blx:
13436 branch_insn = 0xf000e800;
13439 case arm_stub_a8_veneer_bl:
13441 unsigned int i1, j1, i2, j2, s;
13443 branch_insn = 0xf000d000;
13446 if (branch_offset < -16777216 || branch_offset > 16777214)
13448 /* There's not much we can do apart from complain if this
13450 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13451 "of range (input file too large)"), abfd);
13455 /* i1 = not(j1 eor s), so:
13457 j1 = (not i1) eor s. */
13459 branch_insn |= (branch_offset >> 1) & 0x7ff;
13460 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13461 i2 = (branch_offset >> 22) & 1;
13462 i1 = (branch_offset >> 23) & 1;
13463 s = (branch_offset >> 24) & 1;
13466 branch_insn |= j2 << 11;
13467 branch_insn |= j1 << 13;
13468 branch_insn |= s << 26;
13477 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13478 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13483 /* Do code byteswapping. Return FALSE afterwards so that the section is
13484 written out as normal. */
13487 elf32_arm_write_section (bfd *output_bfd,
13488 struct bfd_link_info *link_info,
13490 bfd_byte *contents)
13492 unsigned int mapcount, errcount;
13493 _arm_elf_section_data *arm_data;
13494 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13495 elf32_arm_section_map *map;
13496 elf32_vfp11_erratum_list *errnode;
13499 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13503 if (globals == NULL)
13506 /* If this section has not been allocated an _arm_elf_section_data
13507 structure then we cannot record anything. */
13508 arm_data = get_arm_elf_section_data (sec);
13509 if (arm_data == NULL)
13512 mapcount = arm_data->mapcount;
13513 map = arm_data->map;
13514 errcount = arm_data->erratumcount;
13518 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13520 for (errnode = arm_data->erratumlist; errnode != 0;
13521 errnode = errnode->next)
13523 bfd_vma target = errnode->vma - offset;
13525 switch (errnode->type)
13527 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13529 bfd_vma branch_to_veneer;
13530 /* Original condition code of instruction, plus bit mask for
13531 ARM B instruction. */
13532 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13535 /* The instruction is before the label. */
13538 /* Above offset included in -4 below. */
13539 branch_to_veneer = errnode->u.b.veneer->vma
13540 - errnode->vma - 4;
13542 if ((signed) branch_to_veneer < -(1 << 25)
13543 || (signed) branch_to_veneer >= (1 << 25))
13544 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13545 "range"), output_bfd);
13547 insn |= (branch_to_veneer >> 2) & 0xffffff;
13548 contents[endianflip ^ target] = insn & 0xff;
13549 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13550 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13551 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13555 case VFP11_ERRATUM_ARM_VENEER:
13557 bfd_vma branch_from_veneer;
13560 /* Take size of veneer into account. */
13561 branch_from_veneer = errnode->u.v.branch->vma
13562 - errnode->vma - 12;
13564 if ((signed) branch_from_veneer < -(1 << 25)
13565 || (signed) branch_from_veneer >= (1 << 25))
13566 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13567 "range"), output_bfd);
13569 /* Original instruction. */
13570 insn = errnode->u.v.branch->u.b.vfp_insn;
13571 contents[endianflip ^ target] = insn & 0xff;
13572 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13573 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13574 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13576 /* Branch back to insn after original insn. */
13577 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13578 contents[endianflip ^ (target + 4)] = insn & 0xff;
13579 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13580 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13581 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13591 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13593 arm_unwind_table_edit *edit_node
13594 = arm_data->u.exidx.unwind_edit_list;
13595 /* Now, sec->size is the size of the section we will write. The original
13596 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13597 markers) was sec->rawsize. (This isn't the case if we perform no
13598 edits, then rawsize will be zero and we should use size). */
13599 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13600 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13601 unsigned int in_index, out_index;
13602 bfd_vma add_to_offsets = 0;
13604 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13608 unsigned int edit_index = edit_node->index;
13610 if (in_index < edit_index && in_index * 8 < input_size)
13612 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13613 contents + in_index * 8, add_to_offsets);
13617 else if (in_index == edit_index
13618 || (in_index * 8 >= input_size
13619 && edit_index == UINT_MAX))
13621 switch (edit_node->type)
13623 case DELETE_EXIDX_ENTRY:
13625 add_to_offsets += 8;
13628 case INSERT_EXIDX_CANTUNWIND_AT_END:
13630 asection *text_sec = edit_node->linked_section;
13631 bfd_vma text_offset = text_sec->output_section->vma
13632 + text_sec->output_offset
13634 bfd_vma exidx_offset = offset + out_index * 8;
13635 unsigned long prel31_offset;
13637 /* Note: this is meant to be equivalent to an
13638 R_ARM_PREL31 relocation. These synthetic
13639 EXIDX_CANTUNWIND markers are not relocated by the
13640 usual BFD method. */
13641 prel31_offset = (text_offset - exidx_offset)
13644 /* First address we can't unwind. */
13645 bfd_put_32 (output_bfd, prel31_offset,
13646 &edited_contents[out_index * 8]);
13648 /* Code for EXIDX_CANTUNWIND. */
13649 bfd_put_32 (output_bfd, 0x1,
13650 &edited_contents[out_index * 8 + 4]);
13653 add_to_offsets -= 8;
13658 edit_node = edit_node->next;
13663 /* No more edits, copy remaining entries verbatim. */
13664 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13665 contents + in_index * 8, add_to_offsets);
13671 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13672 bfd_set_section_contents (output_bfd, sec->output_section,
13674 (file_ptr) sec->output_offset, sec->size);
13679 /* Fix code to point to Cortex-A8 erratum stubs. */
13680 if (globals->fix_cortex_a8)
13682 struct a8_branch_to_stub_data data;
13684 data.writing_section = sec;
13685 data.contents = contents;
13687 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13694 if (globals->byteswap_code)
13696 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13699 for (i = 0; i < mapcount; i++)
13701 if (i == mapcount - 1)
13704 end = map[i + 1].vma;
13706 switch (map[i].type)
13709 /* Byte swap code words. */
13710 while (ptr + 3 < end)
13712 tmp = contents[ptr];
13713 contents[ptr] = contents[ptr + 3];
13714 contents[ptr + 3] = tmp;
13715 tmp = contents[ptr + 1];
13716 contents[ptr + 1] = contents[ptr + 2];
13717 contents[ptr + 2] = tmp;
13723 /* Byte swap code halfwords. */
13724 while (ptr + 1 < end)
13726 tmp = contents[ptr];
13727 contents[ptr] = contents[ptr + 1];
13728 contents[ptr + 1] = tmp;
13734 /* Leave data alone. */
13742 arm_data->mapcount = -1;
13743 arm_data->mapsize = 0;
13744 arm_data->map = NULL;
13749 /* Display STT_ARM_TFUNC symbols as functions. */
13752 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13755 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13757 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13758 elfsym->symbol.flags |= BSF_FUNCTION;
13762 /* Mangle thumb function symbols as we read them in. */
13765 elf32_arm_swap_symbol_in (bfd * abfd,
13768 Elf_Internal_Sym *dst)
13770 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13773 /* New EABI objects mark thumb function symbols by setting the low bit of
13774 the address. Turn these into STT_ARM_TFUNC. */
13775 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13776 && (dst->st_value & 1))
13778 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13779 dst->st_value &= ~(bfd_vma) 1;
13785 /* Mangle thumb function symbols as we write them out. */
13788 elf32_arm_swap_symbol_out (bfd *abfd,
13789 const Elf_Internal_Sym *src,
13793 Elf_Internal_Sym newsym;
13795 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13796 of the address set, as per the new EABI. We do this unconditionally
13797 because objcopy does not set the elf header flags until after
13798 it writes out the symbol table. */
13799 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13802 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13803 if (newsym.st_shndx != SHN_UNDEF)
13805 /* Do this only for defined symbols. At link type, the static
13806 linker will simulate the work of dynamic linker of resolving
13807 symbols and will carry over the thumbness of found symbols to
13808 the output symbol table. It's not clear how it happens, but
13809 the thumbness of undefined symbols can well be different at
13810 runtime, and writing '1' for them will be confusing for users
13811 and possibly for dynamic linker itself.
13813 newsym.st_value |= 1;
13818 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13821 /* Add the PT_ARM_EXIDX program header. */
13824 elf32_arm_modify_segment_map (bfd *abfd,
13825 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13827 struct elf_segment_map *m;
13830 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13831 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13833 /* If there is already a PT_ARM_EXIDX header, then we do not
13834 want to add another one. This situation arises when running
13835 "strip"; the input binary already has the header. */
13836 m = elf_tdata (abfd)->segment_map;
13837 while (m && m->p_type != PT_ARM_EXIDX)
13841 m = (struct elf_segment_map *)
13842 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13845 m->p_type = PT_ARM_EXIDX;
13847 m->sections[0] = sec;
13849 m->next = elf_tdata (abfd)->segment_map;
13850 elf_tdata (abfd)->segment_map = m;
13857 /* We may add a PT_ARM_EXIDX program header. */
13860 elf32_arm_additional_program_headers (bfd *abfd,
13861 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13865 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13866 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13872 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13875 elf32_arm_is_function_type (unsigned int type)
13877 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13880 /* We use this to override swap_symbol_in and swap_symbol_out. */
13881 const struct elf_size_info elf32_arm_size_info =
13883 sizeof (Elf32_External_Ehdr),
13884 sizeof (Elf32_External_Phdr),
13885 sizeof (Elf32_External_Shdr),
13886 sizeof (Elf32_External_Rel),
13887 sizeof (Elf32_External_Rela),
13888 sizeof (Elf32_External_Sym),
13889 sizeof (Elf32_External_Dyn),
13890 sizeof (Elf_External_Note),
13894 ELFCLASS32, EV_CURRENT,
13895 bfd_elf32_write_out_phdrs,
13896 bfd_elf32_write_shdrs_and_ehdr,
13897 bfd_elf32_checksum_contents,
13898 bfd_elf32_write_relocs,
13899 elf32_arm_swap_symbol_in,
13900 elf32_arm_swap_symbol_out,
13901 bfd_elf32_slurp_reloc_table,
13902 bfd_elf32_slurp_symbol_table,
13903 bfd_elf32_swap_dyn_in,
13904 bfd_elf32_swap_dyn_out,
13905 bfd_elf32_swap_reloc_in,
13906 bfd_elf32_swap_reloc_out,
13907 bfd_elf32_swap_reloca_in,
13908 bfd_elf32_swap_reloca_out
13911 #define ELF_ARCH bfd_arch_arm
13912 #define ELF_TARGET_ID ARM_ELF_DATA
13913 #define ELF_MACHINE_CODE EM_ARM
13914 #ifdef __QNXTARGET__
13915 #define ELF_MAXPAGESIZE 0x1000
13917 #define ELF_MAXPAGESIZE 0x8000
13919 #define ELF_MINPAGESIZE 0x1000
13920 #define ELF_COMMONPAGESIZE 0x1000
13922 #define bfd_elf32_mkobject elf32_arm_mkobject
13924 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13925 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13926 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13927 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13928 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13929 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13930 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13931 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13932 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13933 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13934 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13935 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13936 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13938 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13939 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13940 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13941 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13942 #define elf_backend_check_relocs elf32_arm_check_relocs
13943 #define elf_backend_relocate_section elf32_arm_relocate_section
13944 #define elf_backend_write_section elf32_arm_write_section
13945 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13946 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13947 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13948 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13949 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13950 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13951 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13952 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13953 #define elf_backend_object_p elf32_arm_object_p
13954 #define elf_backend_section_flags elf32_arm_section_flags
13955 #define elf_backend_fake_sections elf32_arm_fake_sections
13956 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13957 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13958 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13959 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13960 #define elf_backend_size_info elf32_arm_size_info
13961 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13962 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13963 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13964 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13965 #define elf_backend_is_function_type elf32_arm_is_function_type
13967 #define elf_backend_can_refcount 1
13968 #define elf_backend_can_gc_sections 1
13969 #define elf_backend_plt_readonly 1
13970 #define elf_backend_want_got_plt 1
13971 #define elf_backend_want_plt_sym 0
13972 #define elf_backend_may_use_rel_p 1
13973 #define elf_backend_may_use_rela_p 0
13974 #define elf_backend_default_use_rela_p 0
13976 #define elf_backend_got_header_size 12
13978 #undef elf_backend_obj_attrs_vendor
13979 #define elf_backend_obj_attrs_vendor "aeabi"
13980 #undef elf_backend_obj_attrs_section
13981 #define elf_backend_obj_attrs_section ".ARM.attributes"
13982 #undef elf_backend_obj_attrs_arg_type
13983 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13984 #undef elf_backend_obj_attrs_section_type
13985 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13986 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13988 #include "elf32-target.h"
13990 /* VxWorks Targets. */
13992 #undef TARGET_LITTLE_SYM
13993 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13994 #undef TARGET_LITTLE_NAME
13995 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13996 #undef TARGET_BIG_SYM
13997 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13998 #undef TARGET_BIG_NAME
13999 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
14001 /* Like elf32_arm_link_hash_table_create -- but overrides
14002 appropriately for VxWorks. */
14004 static struct bfd_link_hash_table *
14005 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
14007 struct bfd_link_hash_table *ret;
14009 ret = elf32_arm_link_hash_table_create (abfd);
14012 struct elf32_arm_link_hash_table *htab
14013 = (struct elf32_arm_link_hash_table *) ret;
14015 htab->vxworks_p = 1;
14021 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
14023 elf32_arm_final_write_processing (abfd, linker);
14024 elf_vxworks_final_write_processing (abfd, linker);
14028 #define elf32_bed elf32_arm_vxworks_bed
14030 #undef bfd_elf32_bfd_link_hash_table_create
14031 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
14032 #undef elf_backend_add_symbol_hook
14033 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
14034 #undef elf_backend_final_write_processing
14035 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
14036 #undef elf_backend_emit_relocs
14037 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
14039 #undef elf_backend_may_use_rel_p
14040 #define elf_backend_may_use_rel_p 0
14041 #undef elf_backend_may_use_rela_p
14042 #define elf_backend_may_use_rela_p 1
14043 #undef elf_backend_default_use_rela_p
14044 #define elf_backend_default_use_rela_p 1
14045 #undef elf_backend_want_plt_sym
14046 #define elf_backend_want_plt_sym 1
14047 #undef ELF_MAXPAGESIZE
14048 #define ELF_MAXPAGESIZE 0x1000
14050 #include "elf32-target.h"
14053 /* Merge backend specific data from an object file to the output
14054 object file when linking. */
14057 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
14059 flagword out_flags;
14061 bfd_boolean flags_compatible = TRUE;
14064 /* Check if we have the same endianess. */
14065 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
14068 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
14071 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
14074 /* The input BFD must have had its flags initialised. */
14075 /* The following seems bogus to me -- The flags are initialized in
14076 the assembler but I don't think an elf_flags_init field is
14077 written into the object. */
14078 /* BFD_ASSERT (elf_flags_init (ibfd)); */
14080 in_flags = elf_elfheader (ibfd)->e_flags;
14081 out_flags = elf_elfheader (obfd)->e_flags;
14083 /* In theory there is no reason why we couldn't handle this. However
14084 in practice it isn't even close to working and there is no real
14085 reason to want it. */
14086 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
14087 && !(ibfd->flags & DYNAMIC)
14088 && (in_flags & EF_ARM_BE8))
14090 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14095 if (!elf_flags_init (obfd))
14097 /* If the input is the default architecture and had the default
14098 flags then do not bother setting the flags for the output
14099 architecture, instead allow future merges to do this. If no
14100 future merges ever set these flags then they will retain their
14101 uninitialised values, which surprise surprise, correspond
14102 to the default values. */
14103 if (bfd_get_arch_info (ibfd)->the_default
14104 && elf_elfheader (ibfd)->e_flags == 0)
14107 elf_flags_init (obfd) = TRUE;
14108 elf_elfheader (obfd)->e_flags = in_flags;
14110 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14111 && bfd_get_arch_info (obfd)->the_default)
14112 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14117 /* Determine what should happen if the input ARM architecture
14118 does not match the output ARM architecture. */
14119 if (! bfd_arm_merge_machines (ibfd, obfd))
14122 /* Identical flags must be compatible. */
14123 if (in_flags == out_flags)
14126 /* Check to see if the input BFD actually contains any sections. If
14127 not, its flags may not have been initialised either, but it
14128 cannot actually cause any incompatiblity. Do not short-circuit
14129 dynamic objects; their section list may be emptied by
14130 elf_link_add_object_symbols.
14132 Also check to see if there are no code sections in the input.
14133 In this case there is no need to check for code specific flags.
14134 XXX - do we need to worry about floating-point format compatability
14135 in data sections ? */
14136 if (!(ibfd->flags & DYNAMIC))
14138 bfd_boolean null_input_bfd = TRUE;
14139 bfd_boolean only_data_sections = TRUE;
14141 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14143 /* Ignore synthetic glue sections. */
14144 if (strcmp (sec->name, ".glue_7")
14145 && strcmp (sec->name, ".glue_7t"))
14147 if ((bfd_get_section_flags (ibfd, sec)
14148 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14149 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14150 only_data_sections = FALSE;
14152 null_input_bfd = FALSE;
14157 if (null_input_bfd || only_data_sections)
14161 /* Complain about various flag mismatches. */
14162 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14163 EF_ARM_EABI_VERSION (out_flags)))
14166 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14168 (in_flags & EF_ARM_EABIMASK) >> 24,
14169 (out_flags & EF_ARM_EABIMASK) >> 24);
14173 /* Not sure what needs to be checked for EABI versions >= 1. */
14174 /* VxWorks libraries do not use these flags. */
14175 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14176 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14177 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14179 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14182 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14184 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14185 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14186 flags_compatible = FALSE;
14189 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14191 if (in_flags & EF_ARM_APCS_FLOAT)
14193 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14197 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14200 flags_compatible = FALSE;
14203 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14205 if (in_flags & EF_ARM_VFP_FLOAT)
14207 (_("error: %B uses VFP instructions, whereas %B does not"),
14211 (_("error: %B uses FPA instructions, whereas %B does not"),
14214 flags_compatible = FALSE;
14217 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14219 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14221 (_("error: %B uses Maverick instructions, whereas %B does not"),
14225 (_("error: %B does not use Maverick instructions, whereas %B does"),
14228 flags_compatible = FALSE;
14231 #ifdef EF_ARM_SOFT_FLOAT
14232 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14234 /* We can allow interworking between code that is VFP format
14235 layout, and uses either soft float or integer regs for
14236 passing floating point arguments and results. We already
14237 know that the APCS_FLOAT flags match; similarly for VFP
14239 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14240 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14242 if (in_flags & EF_ARM_SOFT_FLOAT)
14244 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14248 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14251 flags_compatible = FALSE;
14256 /* Interworking mismatch is only a warning. */
14257 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14259 if (in_flags & EF_ARM_INTERWORK)
14262 (_("Warning: %B supports interworking, whereas %B does not"),
14268 (_("Warning: %B does not support interworking, whereas %B does"),
14274 return flags_compatible;
14278 /* Symbian OS Targets. */
14280 #undef TARGET_LITTLE_SYM
14281 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14282 #undef TARGET_LITTLE_NAME
14283 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14284 #undef TARGET_BIG_SYM
14285 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14286 #undef TARGET_BIG_NAME
14287 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14289 /* Like elf32_arm_link_hash_table_create -- but overrides
14290 appropriately for Symbian OS. */
14292 static struct bfd_link_hash_table *
14293 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14295 struct bfd_link_hash_table *ret;
14297 ret = elf32_arm_link_hash_table_create (abfd);
14300 struct elf32_arm_link_hash_table *htab
14301 = (struct elf32_arm_link_hash_table *)ret;
14302 /* There is no PLT header for Symbian OS. */
14303 htab->plt_header_size = 0;
14304 /* The PLT entries are each one instruction and one word. */
14305 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14306 htab->symbian_p = 1;
14307 /* Symbian uses armv5t or above, so use_blx is always true. */
14309 htab->root.is_relocatable_executable = 1;
14314 static const struct bfd_elf_special_section
14315 elf32_arm_symbian_special_sections[] =
14317 /* In a BPABI executable, the dynamic linking sections do not go in
14318 the loadable read-only segment. The post-linker may wish to
14319 refer to these sections, but they are not part of the final
14321 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14322 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14323 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14324 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14325 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14326 /* These sections do not need to be writable as the SymbianOS
14327 postlinker will arrange things so that no dynamic relocation is
14329 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14330 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14331 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14332 { NULL, 0, 0, 0, 0 }
14336 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14337 struct bfd_link_info *link_info)
14339 /* BPABI objects are never loaded directly by an OS kernel; they are
14340 processed by a postlinker first, into an OS-specific format. If
14341 the D_PAGED bit is set on the file, BFD will align segments on
14342 page boundaries, so that an OS can directly map the file. With
14343 BPABI objects, that just results in wasted space. In addition,
14344 because we clear the D_PAGED bit, map_sections_to_segments will
14345 recognize that the program headers should not be mapped into any
14346 loadable segment. */
14347 abfd->flags &= ~D_PAGED;
14348 elf32_arm_begin_write_processing (abfd, link_info);
14352 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14353 struct bfd_link_info *info)
14355 struct elf_segment_map *m;
14358 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14359 segment. However, because the .dynamic section is not marked
14360 with SEC_LOAD, the generic ELF code will not create such a
14362 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14365 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14366 if (m->p_type == PT_DYNAMIC)
14371 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14372 m->next = elf_tdata (abfd)->segment_map;
14373 elf_tdata (abfd)->segment_map = m;
14377 /* Also call the generic arm routine. */
14378 return elf32_arm_modify_segment_map (abfd, info);
14381 /* Return address for Ith PLT stub in section PLT, for relocation REL
14382 or (bfd_vma) -1 if it should not be included. */
14385 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14386 const arelent *rel ATTRIBUTE_UNUSED)
14388 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14393 #define elf32_bed elf32_arm_symbian_bed
14395 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14396 will process them and then discard them. */
14397 #undef ELF_DYNAMIC_SEC_FLAGS
14398 #define ELF_DYNAMIC_SEC_FLAGS \
14399 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14401 #undef elf_backend_add_symbol_hook
14402 #undef elf_backend_emit_relocs
14404 #undef bfd_elf32_bfd_link_hash_table_create
14405 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14406 #undef elf_backend_special_sections
14407 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14408 #undef elf_backend_begin_write_processing
14409 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14410 #undef elf_backend_final_write_processing
14411 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14413 #undef elf_backend_modify_segment_map
14414 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14416 /* There is no .got section for BPABI objects, and hence no header. */
14417 #undef elf_backend_got_header_size
14418 #define elf_backend_got_header_size 0
14420 /* Similarly, there is no .got.plt section. */
14421 #undef elf_backend_want_got_plt
14422 #define elf_backend_want_got_plt 0
14424 #undef elf_backend_plt_sym_val
14425 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14427 #undef elf_backend_may_use_rel_p
14428 #define elf_backend_may_use_rel_p 1
14429 #undef elf_backend_may_use_rela_p
14430 #define elf_backend_may_use_rela_p 0
14431 #undef elf_backend_default_use_rela_p
14432 #define elf_backend_default_use_rela_p 0
14433 #undef elf_backend_want_plt_sym
14434 #define elf_backend_want_plt_sym 0
14435 #undef ELF_MAXPAGESIZE
14436 #define ELF_MAXPAGESIZE 0x8000
14438 #include "elf32-target.h"