1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "libiberty.h"
29 #include "elf-vxworks.h"
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 static reloc_howto_type elf32_arm_howto_table_1[] =
76 HOWTO (R_ARM_NONE, /* type */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
80 FALSE, /* pc_relative */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
88 FALSE), /* pcrel_offset */
90 HOWTO (R_ARM_PC24, /* type */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
94 TRUE, /* pc_relative */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
109 FALSE, /* pc_relative */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
124 TRUE, /* pc_relative */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
139 TRUE, /* pc_relative */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
154 FALSE, /* pc_relative */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
169 FALSE, /* pc_relative */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
179 HOWTO (R_ARM_THM_ABS5, /* type */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
183 FALSE, /* pc_relative */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
194 HOWTO (R_ARM_ABS8, /* type */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
198 FALSE, /* pc_relative */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
208 HOWTO (R_ARM_SBREL32, /* type */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
212 FALSE, /* pc_relative */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
222 HOWTO (R_ARM_THM_CALL, /* type */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
226 TRUE, /* pc_relative */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
236 HOWTO (R_ARM_THM_PC8, /* type */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
240 TRUE, /* pc_relative */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
250 HOWTO (R_ARM_BREL_ADJ, /* type */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
254 FALSE, /* pc_relative */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
264 HOWTO (R_ARM_SWI24, /* type */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
268 FALSE, /* pc_relative */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
278 HOWTO (R_ARM_THM_SWI8, /* type */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
282 FALSE, /* pc_relative */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
297 TRUE, /* pc_relative */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
312 TRUE, /* pc_relative */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
322 /* Dynamic TLS relocations. */
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
328 FALSE, /* pc_relative */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
342 FALSE, /* pc_relative */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
356 FALSE, /* pc_relative */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
366 /* Relocs used in ARM Linux */
368 HOWTO (R_ARM_COPY, /* type */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
372 FALSE, /* pc_relative */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
382 HOWTO (R_ARM_GLOB_DAT, /* type */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
386 FALSE, /* pc_relative */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
400 FALSE, /* pc_relative */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
410 HOWTO (R_ARM_RELATIVE, /* type */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
414 FALSE, /* pc_relative */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
424 HOWTO (R_ARM_GOTOFF32, /* type */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
428 FALSE, /* pc_relative */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
438 HOWTO (R_ARM_GOTPC, /* type */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
442 TRUE, /* pc_relative */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
452 HOWTO (R_ARM_GOT32, /* type */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
456 FALSE, /* pc_relative */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
466 HOWTO (R_ARM_PLT32, /* type */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
470 TRUE, /* pc_relative */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
480 HOWTO (R_ARM_CALL, /* type */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
484 TRUE, /* pc_relative */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
494 HOWTO (R_ARM_JUMP24, /* type */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
498 TRUE, /* pc_relative */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
508 HOWTO (R_ARM_THM_JUMP24, /* type */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
512 TRUE, /* pc_relative */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
522 HOWTO (R_ARM_BASE_ABS, /* type */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
526 FALSE, /* pc_relative */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
540 TRUE, /* pc_relative */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
554 TRUE, /* pc_relative */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 TRUE, /* pc_relative */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
582 FALSE, /* pc_relative */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
596 FALSE, /* pc_relative */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
610 FALSE, /* pc_relative */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
620 HOWTO (R_ARM_TARGET1, /* type */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
624 FALSE, /* pc_relative */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
634 HOWTO (R_ARM_ROSEGREL32, /* type */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
638 FALSE, /* pc_relative */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
648 HOWTO (R_ARM_V4BX, /* type */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
652 FALSE, /* pc_relative */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
662 HOWTO (R_ARM_TARGET2, /* type */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
666 FALSE, /* pc_relative */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
676 HOWTO (R_ARM_PREL31, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE, /* pc_relative */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
694 FALSE, /* pc_relative */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
704 HOWTO (R_ARM_MOVT_ABS, /* type */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
708 FALSE, /* pc_relative */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
722 TRUE, /* pc_relative */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
732 HOWTO (R_ARM_MOVT_PREL, /* type */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
736 TRUE, /* pc_relative */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
750 FALSE, /* pc_relative */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
764 FALSE, /* pc_relative */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
778 TRUE, /* pc_relative */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
792 TRUE, /* pc_relative */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
802 HOWTO (R_ARM_THM_JUMP19, /* type */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
806 TRUE, /* pc_relative */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
816 HOWTO (R_ARM_THM_JUMP6, /* type */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
820 TRUE, /* pc_relative */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
837 TRUE, /* pc_relative */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
847 HOWTO (R_ARM_THM_PC12, /* type */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
851 TRUE, /* pc_relative */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
861 HOWTO (R_ARM_ABS32_NOI, /* type */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
865 FALSE, /* pc_relative */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
875 HOWTO (R_ARM_REL32_NOI, /* type */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
879 TRUE, /* pc_relative */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
889 /* Group relocations. */
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
895 TRUE, /* pc_relative */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
909 TRUE, /* pc_relative */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
923 TRUE, /* pc_relative */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
937 TRUE, /* pc_relative */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 TRUE, /* pc_relative */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
965 TRUE, /* pc_relative */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
979 TRUE, /* pc_relative */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
993 TRUE, /* pc_relative */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1007 TRUE, /* pc_relative */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1021 TRUE, /* pc_relative */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 TRUE, /* pc_relative */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1049 TRUE, /* pc_relative */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1063 TRUE, /* pc_relative */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1077 TRUE, /* pc_relative */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1091 TRUE, /* pc_relative */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1105 TRUE, /* pc_relative */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1119 TRUE, /* pc_relative */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1133 TRUE, /* pc_relative */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1147 TRUE, /* pc_relative */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1161 TRUE, /* pc_relative */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1175 TRUE, /* pc_relative */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1189 TRUE, /* pc_relative */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1203 TRUE, /* pc_relative */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1217 TRUE, /* pc_relative */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1231 TRUE, /* pc_relative */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1245 TRUE, /* pc_relative */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1259 TRUE, /* pc_relative */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1269 /* End of group relocations. */
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1275 FALSE, /* pc_relative */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1289 FALSE, /* pc_relative */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1303 FALSE, /* pc_relative */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1317 FALSE, /* pc_relative */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1331 FALSE, /* pc_relative */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1345 FALSE, /* pc_relative */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1355 EMPTY_HOWTO (90), /* Unallocated. */
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 FALSE, /* pc_relative */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 FALSE, /* pc_relative */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 TRUE, /* pc_relative */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 FALSE, /* pc_relative */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 FALSE, /* pc_relative */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1437 FALSE, /* pc_relative */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1445 FALSE), /* pcrel_offset */
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1452 FALSE, /* pc_relative */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1460 FALSE), /* pcrel_offset */
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1466 TRUE, /* pc_relative */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1480 TRUE, /* pc_relative */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1495 FALSE, /* pc_relative */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1509 FALSE, /* pc_relative */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1523 FALSE, /* pc_relative */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1537 FALSE, /* pc_relative */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 FALSE, /* pc_relative */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 FALSE, /* pc_relative */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 FALSE, /* pc_relative */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 FALSE, /* pc_relative */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1608 249-255 extended, currently unused, relocations: */
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1612 HOWTO (R_ARM_RREL32, /* type */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1616 FALSE, /* pc_relative */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1624 FALSE), /* pcrel_offset */
1626 HOWTO (R_ARM_RABS32, /* type */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1630 FALSE, /* pc_relative */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1638 FALSE), /* pcrel_offset */
1640 HOWTO (R_ARM_RPC24, /* type */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1644 FALSE, /* pc_relative */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1652 FALSE), /* pcrel_offset */
1654 HOWTO (R_ARM_RBASE, /* type */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1658 FALSE, /* pc_relative */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1666 FALSE) /* pcrel_offset */
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1686 unsigned int r_type;
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 struct elf32_arm_reloc_map
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1725 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1726 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1727 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1728 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1729 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1730 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1731 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1732 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1733 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1734 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1735 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1736 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1737 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1738 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1739 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1740 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1741 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1742 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1743 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1744 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1745 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1746 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1747 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1748 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1749 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1750 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1751 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1752 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1753 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1754 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1755 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1756 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1757 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1758 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1759 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1760 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1761 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1762 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1763 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1764 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1765 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1766 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1767 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1768 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1769 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1770 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1771 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1772 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1773 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1774 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1775 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1776 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1777 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1778 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1779 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1782 static reloc_howto_type *
1783 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1784 bfd_reloc_code_real_type code)
1788 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1789 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1790 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1795 static reloc_howto_type *
1796 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1801 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1802 if (elf32_arm_howto_table_1[i].name != NULL
1803 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1804 return &elf32_arm_howto_table_1[i];
1806 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1807 if (elf32_arm_howto_table_2[i].name != NULL
1808 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1809 return &elf32_arm_howto_table_2[i];
1814 /* Support for core dump NOTE sections. */
1817 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1822 switch (note->descsz)
1827 case 148: /* Linux/ARM 32-bit. */
1829 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1832 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1841 /* Make a ".reg/999" section. */
1842 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1843 size, note->descpos + offset);
1847 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 switch (note->descsz)
1854 case 124: /* Linux/ARM elf_prpsinfo. */
1855 elf_tdata (abfd)->core_program
1856 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1857 elf_tdata (abfd)->core_command
1858 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1861 /* Note that for some reason, a spurious space is tacked
1862 onto the end of the args in some (at least one anyway)
1863 implementations, so strip it off if it exists. */
1865 char *command = elf_tdata (abfd)->core_command;
1866 int n = strlen (command);
1868 if (0 < n && command[n - 1] == ' ')
1869 command[n - 1] = '\0';
1875 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1876 #define TARGET_LITTLE_NAME "elf32-littlearm"
1877 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1878 #define TARGET_BIG_NAME "elf32-bigarm"
1880 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1881 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883 typedef unsigned long int insn32;
1884 typedef unsigned short int insn16;
1886 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 #define INTERWORK_FLAG(abfd) \
1889 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1890 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1891 || ((abfd)->flags & BFD_LINKER_CREATED))
1893 /* The linker script knows the section names for placement.
1894 The entry_names are used to do simple name mangling on the stubs.
1895 Given a function name, and its type, the stub can be found. The
1896 name can be changed. The only requirement is the %s be present. */
1897 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1898 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1901 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1904 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1907 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909 #define STUB_ENTRY_NAME "__%s_veneer"
1911 /* The name of the dynamic interpreter. This is put in the .interp
1913 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915 #ifdef FOUR_WORD_PLT
1917 /* The first entry in a procedure linkage table looks like
1918 this. It is set up so that any shared library function that is
1919 called before the relocation has been set up calls the dynamic
1921 static const bfd_vma elf32_arm_plt0_entry [] =
1923 0xe52de004, /* str lr, [sp, #-4]! */
1924 0xe59fe010, /* ldr lr, [pc, #16] */
1925 0xe08fe00e, /* add lr, pc, lr */
1926 0xe5bef008, /* ldr pc, [lr, #8]! */
1929 /* Subsequent entries in a procedure linkage table look like
1931 static const bfd_vma elf32_arm_plt_entry [] =
1933 0xe28fc600, /* add ip, pc, #NN */
1934 0xe28cca00, /* add ip, ip, #NN */
1935 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1936 0x00000000, /* unused */
1941 /* The first entry in a procedure linkage table looks like
1942 this. It is set up so that any shared library function that is
1943 called before the relocation has been set up calls the dynamic
1945 static const bfd_vma elf32_arm_plt0_entry [] =
1947 0xe52de004, /* str lr, [sp, #-4]! */
1948 0xe59fe004, /* ldr lr, [pc, #4] */
1949 0xe08fe00e, /* add lr, pc, lr */
1950 0xe5bef008, /* ldr pc, [lr, #8]! */
1951 0x00000000, /* &GOT[0] - . */
1954 /* Subsequent entries in a procedure linkage table look like
1956 static const bfd_vma elf32_arm_plt_entry [] =
1958 0xe28fc600, /* add ip, pc, #0xNN00000 */
1959 0xe28cca00, /* add ip, ip, #0xNN000 */
1960 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1965 /* The format of the first entry in the procedure linkage table
1966 for a VxWorks executable. */
1967 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 0xe52dc008, /* str ip,[sp,#-8]! */
1970 0xe59fc000, /* ldr ip,[pc] */
1971 0xe59cf008, /* ldr pc,[ip,#8] */
1972 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1975 /* The format of subsequent entries in a VxWorks executable. */
1976 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 0xe59fc000, /* ldr ip,[pc] */
1979 0xe59cf000, /* ldr pc,[ip] */
1980 0x00000000, /* .long @got */
1981 0xe59fc000, /* ldr ip,[pc] */
1982 0xea000000, /* b _PLT */
1983 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1986 /* The format of entries in a VxWorks shared library. */
1987 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 0xe59fc000, /* ldr ip,[pc] */
1990 0xe79cf009, /* ldr pc,[ip,r9] */
1991 0x00000000, /* .long @got */
1992 0xe59fc000, /* ldr ip,[pc] */
1993 0xe599f008, /* ldr pc,[r9,#8] */
1994 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1997 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1998 #define PLT_THUMB_STUB_SIZE 4
1999 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2005 /* The entries in a PLT when using a DLL-based target with multiple
2007 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 0xe51ff004, /* ldr pc, [pc, #-4] */
2010 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2013 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2014 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2015 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2016 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2017 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2018 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2028 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2029 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2030 is inserted in arm_build_one_stub(). */
2031 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2032 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2033 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2034 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2035 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2036 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2041 enum stub_insn_type type;
2042 unsigned int r_type;
2046 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2047 to reach the stub if necessary. */
2048 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2051 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2054 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2059 ARM_INSN(0xe12fff1c), /* bx ip */
2060 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2063 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2064 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 THUMB16_INSN(0xb401), /* push {r0} */
2067 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2068 THUMB16_INSN(0x4684), /* mov ip, r0 */
2069 THUMB16_INSN(0xbc01), /* pop {r0} */
2070 THUMB16_INSN(0x4760), /* bx ip */
2071 THUMB16_INSN(0xbf00), /* nop */
2072 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2075 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 THUMB16_INSN(0x4778), /* bx pc */
2080 THUMB16_INSN(0x46c0), /* nop */
2081 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2082 ARM_INSN(0xe12fff1c), /* bx ip */
2083 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2086 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 THUMB16_INSN(0x4778), /* bx pc */
2091 THUMB16_INSN(0x46c0), /* nop */
2092 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2093 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2096 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2097 one, when the destination is close enough. */
2098 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 THUMB16_INSN(0x4778), /* bx pc */
2101 THUMB16_INSN(0x46c0), /* nop */
2102 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2105 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2106 blx to reach the stub if necessary. */
2107 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2110 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2111 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2114 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2115 blx to reach the stub if necessary. We can not add into pc;
2116 it is not guaranteed to mode switch (different in ARMv6 and
2118 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2121 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2122 ARM_INSN(0xe12fff1c), /* bx ip */
2123 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2126 /* V4T ARM -> ARM long branch stub, PIC. */
2127 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2130 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2131 ARM_INSN(0xe12fff1c), /* bx ip */
2132 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2135 /* V4T Thumb -> ARM long branch stub, PIC. */
2136 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 THUMB16_INSN(0x4778), /* bx pc */
2139 THUMB16_INSN(0x46c0), /* nop */
2140 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2141 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2142 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2145 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 THUMB16_INSN(0xb401), /* push {r0} */
2150 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2151 THUMB16_INSN(0x46fc), /* mov ip, pc */
2152 THUMB16_INSN(0x4484), /* add ip, r0 */
2153 THUMB16_INSN(0xbc01), /* pop {r0} */
2154 THUMB16_INSN(0x4760), /* bx ip */
2155 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2158 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 THUMB16_INSN(0x4778), /* bx pc */
2163 THUMB16_INSN(0x46c0), /* nop */
2164 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2165 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2166 ARM_INSN(0xe12fff1c), /* bx ip */
2167 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2170 /* Cortex-A8 erratum-workaround stubs. */
2172 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2173 can't use a conditional branch to reach this stub). */
2175 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2178 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2179 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2182 /* Stub used for b.w and bl.w instructions. */
2184 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2189 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2194 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2195 instruction (which switches to ARM mode) to point to this stub. Jump to the
2196 real destination using an ARM-mode branch. */
2198 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2203 /* Section name for stubs is the associated section name plus this
2205 #define STUB_SUFFIX ".stub"
2207 /* One entry per long/short branch stub defined above. */
2209 DEF_STUB(long_branch_any_any) \
2210 DEF_STUB(long_branch_v4t_arm_thumb) \
2211 DEF_STUB(long_branch_thumb_only) \
2212 DEF_STUB(long_branch_v4t_thumb_thumb) \
2213 DEF_STUB(long_branch_v4t_thumb_arm) \
2214 DEF_STUB(short_branch_v4t_thumb_arm) \
2215 DEF_STUB(long_branch_any_arm_pic) \
2216 DEF_STUB(long_branch_any_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2220 DEF_STUB(long_branch_thumb_only_pic) \
2221 DEF_STUB(a8_veneer_b_cond) \
2222 DEF_STUB(a8_veneer_b) \
2223 DEF_STUB(a8_veneer_bl) \
2224 DEF_STUB(a8_veneer_blx)
2226 #define DEF_STUB(x) arm_stub_##x,
2227 enum elf32_arm_stub_type {
2230 /* Note the first a8_veneer type */
2231 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2237 const insn_sequence* template_sequence;
2241 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2242 static const stub_def stub_definitions[] = {
2247 struct elf32_arm_stub_hash_entry
2249 /* Base hash table entry structure. */
2250 struct bfd_hash_entry root;
2252 /* The stub section. */
2255 /* Offset within stub_sec of the beginning of this stub. */
2256 bfd_vma stub_offset;
2258 /* Given the symbol's value and its section we can determine its final
2259 value when building the stubs (so the stub knows where to jump). */
2260 bfd_vma target_value;
2261 asection *target_section;
2263 /* Offset to apply to relocation referencing target_value. */
2264 bfd_vma target_addend;
2266 /* The instruction which caused this stub to be generated (only valid for
2267 Cortex-A8 erratum workaround stubs at present). */
2268 unsigned long orig_insn;
2270 /* The stub type. */
2271 enum elf32_arm_stub_type stub_type;
2272 /* Its encoding size in bytes. */
2275 const insn_sequence *stub_template;
2276 /* The size of the template (number of entries). */
2277 int stub_template_size;
2279 /* The symbol table entry, if any, that this was derived from. */
2280 struct elf32_arm_link_hash_entry *h;
2282 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2283 unsigned char st_type;
2285 /* Where this stub is being called from, or, in the case of combined
2286 stub sections, the first input section in the group. */
2289 /* The name for the local symbol at the start of this stub. The
2290 stub name in the hash table has to be unique; this does not, so
2291 it can be friendlier. */
2295 /* Used to build a map of a section. This is required for mixed-endian
2298 typedef struct elf32_elf_section_map
2303 elf32_arm_section_map;
2305 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2309 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2310 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2311 VFP11_ERRATUM_ARM_VENEER,
2312 VFP11_ERRATUM_THUMB_VENEER
2314 elf32_vfp11_erratum_type;
2316 typedef struct elf32_vfp11_erratum_list
2318 struct elf32_vfp11_erratum_list *next;
2324 struct elf32_vfp11_erratum_list *veneer;
2325 unsigned int vfp_insn;
2329 struct elf32_vfp11_erratum_list *branch;
2333 elf32_vfp11_erratum_type type;
2335 elf32_vfp11_erratum_list;
2340 INSERT_EXIDX_CANTUNWIND_AT_END
2342 arm_unwind_edit_type;
2344 /* A (sorted) list of edits to apply to an unwind table. */
2345 typedef struct arm_unwind_table_edit
2347 arm_unwind_edit_type type;
2348 /* Note: we sometimes want to insert an unwind entry corresponding to a
2349 section different from the one we're currently writing out, so record the
2350 (text) section this edit relates to here. */
2351 asection *linked_section;
2353 struct arm_unwind_table_edit *next;
2355 arm_unwind_table_edit;
2357 typedef struct _arm_elf_section_data
2359 /* Information about mapping symbols. */
2360 struct bfd_elf_section_data elf;
2361 unsigned int mapcount;
2362 unsigned int mapsize;
2363 elf32_arm_section_map *map;
2364 /* Information about CPU errata. */
2365 unsigned int erratumcount;
2366 elf32_vfp11_erratum_list *erratumlist;
2367 /* Information about unwind tables. */
2370 /* Unwind info attached to a text section. */
2373 asection *arm_exidx_sec;
2376 /* Unwind info attached to an .ARM.exidx section. */
2379 arm_unwind_table_edit *unwind_edit_list;
2380 arm_unwind_table_edit *unwind_edit_tail;
2384 _arm_elf_section_data;
2386 #define elf32_arm_section_data(sec) \
2387 ((_arm_elf_section_data *) elf_section_data (sec))
2389 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2390 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2391 so may be created multiple times: we use an array of these entries whilst
2392 relaxing which we can refresh easily, then create stubs for each potentially
2393 erratum-triggering instruction once we've settled on a solution. */
2395 struct a8_erratum_fix {
2400 unsigned long orig_insn;
2402 enum elf32_arm_stub_type stub_type;
2406 /* A table of relocs applied to branches which might trigger Cortex-A8
2409 struct a8_erratum_reloc {
2411 bfd_vma destination;
2412 unsigned int r_type;
2413 unsigned char st_type;
2414 const char *sym_name;
2415 bfd_boolean non_a8_stub;
2418 /* The size of the thread control block. */
2421 struct elf_arm_obj_tdata
2423 struct elf_obj_tdata root;
2425 /* tls_type for each local got entry. */
2426 char *local_got_tls_type;
2428 /* Zero to warn when linking objects with incompatible enum sizes. */
2429 int no_enum_size_warning;
2431 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2432 int no_wchar_size_warning;
2435 #define elf_arm_tdata(bfd) \
2436 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2438 #define elf32_arm_local_got_tls_type(bfd) \
2439 (elf_arm_tdata (bfd)->local_got_tls_type)
2441 #define is_arm_elf(bfd) \
2442 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2443 && elf_tdata (bfd) != NULL \
2444 && elf_object_id (bfd) == ARM_ELF_DATA)
2447 elf32_arm_mkobject (bfd *abfd)
2449 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2453 /* The ARM linker needs to keep track of the number of relocs that it
2454 decides to copy in check_relocs for each symbol. This is so that
2455 it can discard PC relative relocs if it doesn't need them when
2456 linking with -Bsymbolic. We store the information in a field
2457 extending the regular ELF linker hash table. */
2459 /* This structure keeps track of the number of relocs we have copied
2460 for a given symbol. */
2461 struct elf32_arm_relocs_copied
2464 struct elf32_arm_relocs_copied * next;
2465 /* A section in dynobj. */
2467 /* Number of relocs copied in this section. */
2468 bfd_size_type count;
2469 /* Number of PC-relative relocs copied in this section. */
2470 bfd_size_type pc_count;
2473 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2475 /* Arm ELF linker hash entry. */
2476 struct elf32_arm_link_hash_entry
2478 struct elf_link_hash_entry root;
2480 /* Number of PC relative relocs copied for this symbol. */
2481 struct elf32_arm_relocs_copied * relocs_copied;
2483 /* We reference count Thumb references to a PLT entry separately,
2484 so that we can emit the Thumb trampoline only if needed. */
2485 bfd_signed_vma plt_thumb_refcount;
2487 /* Some references from Thumb code may be eliminated by BL->BLX
2488 conversion, so record them separately. */
2489 bfd_signed_vma plt_maybe_thumb_refcount;
2491 /* Since PLT entries have variable size if the Thumb prologue is
2492 used, we need to record the index into .got.plt instead of
2493 recomputing it from the PLT offset. */
2494 bfd_signed_vma plt_got_offset;
2496 #define GOT_UNKNOWN 0
2497 #define GOT_NORMAL 1
2498 #define GOT_TLS_GD 2
2499 #define GOT_TLS_IE 4
2500 unsigned char tls_type;
2502 /* The symbol marking the real symbol location for exported thumb
2503 symbols with Arm stubs. */
2504 struct elf_link_hash_entry *export_glue;
2506 /* A pointer to the most recently used stub hash entry against this
2508 struct elf32_arm_stub_hash_entry *stub_cache;
2511 /* Traverse an arm ELF linker hash table. */
2512 #define elf32_arm_link_hash_traverse(table, func, info) \
2513 (elf_link_hash_traverse \
2515 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2518 /* Get the ARM elf linker hash table from a link_info structure. */
2519 #define elf32_arm_hash_table(info) \
2520 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2521 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2523 #define arm_stub_hash_lookup(table, string, create, copy) \
2524 ((struct elf32_arm_stub_hash_entry *) \
2525 bfd_hash_lookup ((table), (string), (create), (copy)))
2527 /* Array to keep track of which stub sections have been created, and
2528 information on stub grouping. */
2531 /* This is the section to which stubs in the group will be
2534 /* The stub section. */
2538 /* ARM ELF linker hash table. */
2539 struct elf32_arm_link_hash_table
2541 /* The main hash table. */
2542 struct elf_link_hash_table root;
2544 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2545 bfd_size_type thumb_glue_size;
2547 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2548 bfd_size_type arm_glue_size;
2550 /* The size in bytes of section containing the ARMv4 BX veneers. */
2551 bfd_size_type bx_glue_size;
2553 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2554 veneer has been populated. */
2555 bfd_vma bx_glue_offset[15];
2557 /* The size in bytes of the section containing glue for VFP11 erratum
2559 bfd_size_type vfp11_erratum_glue_size;
2561 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2562 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2563 elf32_arm_write_section(). */
2564 struct a8_erratum_fix *a8_erratum_fixes;
2565 unsigned int num_a8_erratum_fixes;
2567 /* An arbitrary input BFD chosen to hold the glue sections. */
2568 bfd * bfd_of_glue_owner;
2570 /* Nonzero to output a BE8 image. */
2573 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2574 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2577 /* The relocation to use for R_ARM_TARGET2 relocations. */
2580 /* 0 = Ignore R_ARM_V4BX.
2581 1 = Convert BX to MOV PC.
2582 2 = Generate v4 interworing stubs. */
2585 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2588 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2591 /* What sort of code sequences we should look for which may trigger the
2592 VFP11 denorm erratum. */
2593 bfd_arm_vfp11_fix vfp11_fix;
2595 /* Global counter for the number of fixes we have emitted. */
2596 int num_vfp11_fixes;
2598 /* Nonzero to force PIC branch veneers. */
2601 /* The number of bytes in the initial entry in the PLT. */
2602 bfd_size_type plt_header_size;
2604 /* The number of bytes in the subsequent PLT etries. */
2605 bfd_size_type plt_entry_size;
2607 /* True if the target system is VxWorks. */
2610 /* True if the target system is Symbian OS. */
2613 /* True if the target uses REL relocations. */
2616 /* Short-cuts to get to dynamic linker sections. */
2625 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2628 /* Data for R_ARM_TLS_LDM32 relocations. */
2631 bfd_signed_vma refcount;
2635 /* Small local sym cache. */
2636 struct sym_cache sym_cache;
2638 /* For convenience in allocate_dynrelocs. */
2641 /* The stub hash table. */
2642 struct bfd_hash_table stub_hash_table;
2644 /* Linker stub bfd. */
2647 /* Linker call-backs. */
2648 asection * (*add_stub_section) (const char *, asection *);
2649 void (*layout_sections_again) (void);
2651 /* Array to keep track of which stub sections have been created, and
2652 information on stub grouping. */
2653 struct map_stub *stub_group;
2655 /* Number of elements in stub_group. */
2658 /* Assorted information used by elf32_arm_size_stubs. */
2659 unsigned int bfd_count;
2661 asection **input_list;
2664 /* Create an entry in an ARM ELF linker hash table. */
2666 static struct bfd_hash_entry *
2667 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2668 struct bfd_hash_table * table,
2669 const char * string)
2671 struct elf32_arm_link_hash_entry * ret =
2672 (struct elf32_arm_link_hash_entry *) entry;
2674 /* Allocate the structure if it has not already been allocated by a
2677 ret = (struct elf32_arm_link_hash_entry *)
2678 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2680 return (struct bfd_hash_entry *) ret;
2682 /* Call the allocation method of the superclass. */
2683 ret = ((struct elf32_arm_link_hash_entry *)
2684 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2688 ret->relocs_copied = NULL;
2689 ret->tls_type = GOT_UNKNOWN;
2690 ret->plt_thumb_refcount = 0;
2691 ret->plt_maybe_thumb_refcount = 0;
2692 ret->plt_got_offset = -1;
2693 ret->export_glue = NULL;
2695 ret->stub_cache = NULL;
2698 return (struct bfd_hash_entry *) ret;
2701 /* Initialize an entry in the stub hash table. */
2703 static struct bfd_hash_entry *
2704 stub_hash_newfunc (struct bfd_hash_entry *entry,
2705 struct bfd_hash_table *table,
2708 /* Allocate the structure if it has not already been allocated by a
2712 entry = (struct bfd_hash_entry *)
2713 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2718 /* Call the allocation method of the superclass. */
2719 entry = bfd_hash_newfunc (entry, table, string);
2722 struct elf32_arm_stub_hash_entry *eh;
2724 /* Initialize the local fields. */
2725 eh = (struct elf32_arm_stub_hash_entry *) entry;
2726 eh->stub_sec = NULL;
2727 eh->stub_offset = 0;
2728 eh->target_value = 0;
2729 eh->target_section = NULL;
2730 eh->target_addend = 0;
2732 eh->stub_type = arm_stub_none;
2734 eh->stub_template = NULL;
2735 eh->stub_template_size = 0;
2738 eh->output_name = NULL;
2744 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2745 shortcuts to them in our hash table. */
2748 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2750 struct elf32_arm_link_hash_table *htab;
2752 htab = elf32_arm_hash_table (info);
2756 /* BPABI objects never have a GOT, or associated sections. */
2757 if (htab->symbian_p)
2760 if (! _bfd_elf_create_got_section (dynobj, info))
2763 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2764 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2765 if (!htab->sgot || !htab->sgotplt)
2768 htab->srelgot = bfd_get_section_by_name (dynobj,
2769 RELOC_SECTION (htab, ".got"));
2770 if (htab->srelgot == NULL)
2775 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2776 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2780 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2782 struct elf32_arm_link_hash_table *htab;
2784 htab = elf32_arm_hash_table (info);
2788 if (!htab->sgot && !create_got_section (dynobj, info))
2791 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2794 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2795 htab->srelplt = bfd_get_section_by_name (dynobj,
2796 RELOC_SECTION (htab, ".plt"));
2797 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2799 htab->srelbss = bfd_get_section_by_name (dynobj,
2800 RELOC_SECTION (htab, ".bss"));
2802 if (htab->vxworks_p)
2804 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2809 htab->plt_header_size = 0;
2810 htab->plt_entry_size
2811 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2815 htab->plt_header_size
2816 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2817 htab->plt_entry_size
2818 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2825 || (!info->shared && !htab->srelbss))
2831 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2834 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2835 struct elf_link_hash_entry *dir,
2836 struct elf_link_hash_entry *ind)
2838 struct elf32_arm_link_hash_entry *edir, *eind;
2840 edir = (struct elf32_arm_link_hash_entry *) dir;
2841 eind = (struct elf32_arm_link_hash_entry *) ind;
2843 if (eind->relocs_copied != NULL)
2845 if (edir->relocs_copied != NULL)
2847 struct elf32_arm_relocs_copied **pp;
2848 struct elf32_arm_relocs_copied *p;
2850 /* Add reloc counts against the indirect sym to the direct sym
2851 list. Merge any entries against the same section. */
2852 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2854 struct elf32_arm_relocs_copied *q;
2856 for (q = edir->relocs_copied; q != NULL; q = q->next)
2857 if (q->section == p->section)
2859 q->pc_count += p->pc_count;
2860 q->count += p->count;
2867 *pp = edir->relocs_copied;
2870 edir->relocs_copied = eind->relocs_copied;
2871 eind->relocs_copied = NULL;
2874 if (ind->root.type == bfd_link_hash_indirect)
2876 /* Copy over PLT info. */
2877 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2878 eind->plt_thumb_refcount = 0;
2879 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2880 eind->plt_maybe_thumb_refcount = 0;
2882 if (dir->got.refcount <= 0)
2884 edir->tls_type = eind->tls_type;
2885 eind->tls_type = GOT_UNKNOWN;
2889 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2892 /* Create an ARM elf linker hash table. */
2894 static struct bfd_link_hash_table *
2895 elf32_arm_link_hash_table_create (bfd *abfd)
2897 struct elf32_arm_link_hash_table *ret;
2898 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2900 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2904 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2905 elf32_arm_link_hash_newfunc,
2906 sizeof (struct elf32_arm_link_hash_entry),
2914 ret->sgotplt = NULL;
2915 ret->srelgot = NULL;
2917 ret->srelplt = NULL;
2918 ret->sdynbss = NULL;
2919 ret->srelbss = NULL;
2920 ret->srelplt2 = NULL;
2921 ret->thumb_glue_size = 0;
2922 ret->arm_glue_size = 0;
2923 ret->bx_glue_size = 0;
2924 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2925 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2926 ret->vfp11_erratum_glue_size = 0;
2927 ret->num_vfp11_fixes = 0;
2928 ret->fix_cortex_a8 = 0;
2929 ret->bfd_of_glue_owner = NULL;
2930 ret->byteswap_code = 0;
2931 ret->target1_is_rel = 0;
2932 ret->target2_reloc = R_ARM_NONE;
2933 #ifdef FOUR_WORD_PLT
2934 ret->plt_header_size = 16;
2935 ret->plt_entry_size = 16;
2937 ret->plt_header_size = 20;
2938 ret->plt_entry_size = 12;
2945 ret->sym_cache.abfd = NULL;
2947 ret->tls_ldm_got.refcount = 0;
2948 ret->stub_bfd = NULL;
2949 ret->add_stub_section = NULL;
2950 ret->layout_sections_again = NULL;
2951 ret->stub_group = NULL;
2955 ret->input_list = NULL;
2957 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2958 sizeof (struct elf32_arm_stub_hash_entry)))
2964 return &ret->root.root;
2967 /* Free the derived linker hash table. */
2970 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2972 struct elf32_arm_link_hash_table *ret
2973 = (struct elf32_arm_link_hash_table *) hash;
2975 bfd_hash_table_free (&ret->stub_hash_table);
2976 _bfd_generic_link_hash_table_free (hash);
2979 /* Determine if we're dealing with a Thumb only architecture. */
2982 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2984 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2988 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2991 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2992 Tag_CPU_arch_profile);
2994 return profile == 'M';
2997 /* Determine if we're dealing with a Thumb-2 object. */
3000 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3002 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3004 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3007 /* Determine what kind of NOPs are available. */
3010 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3012 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3014 return arch == TAG_CPU_ARCH_V6T2
3015 || arch == TAG_CPU_ARCH_V6K
3016 || arch == TAG_CPU_ARCH_V7
3017 || arch == TAG_CPU_ARCH_V7E_M;
3021 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3023 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3025 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3026 || arch == TAG_CPU_ARCH_V7E_M);
3030 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3034 case arm_stub_long_branch_thumb_only:
3035 case arm_stub_long_branch_v4t_thumb_arm:
3036 case arm_stub_short_branch_v4t_thumb_arm:
3037 case arm_stub_long_branch_v4t_thumb_arm_pic:
3038 case arm_stub_long_branch_thumb_only_pic:
3049 /* Determine the type of stub needed, if any, for a call. */
3051 static enum elf32_arm_stub_type
3052 arm_type_of_stub (struct bfd_link_info *info,
3053 asection *input_sec,
3054 const Elf_Internal_Rela *rel,
3055 int *actual_st_type,
3056 struct elf32_arm_link_hash_entry *hash,
3057 bfd_vma destination,
3063 bfd_signed_vma branch_offset;
3064 unsigned int r_type;
3065 struct elf32_arm_link_hash_table * globals;
3068 enum elf32_arm_stub_type stub_type = arm_stub_none;
3070 int st_type = *actual_st_type;
3072 /* We don't know the actual type of destination in case it is of
3073 type STT_SECTION: give up. */
3074 if (st_type == STT_SECTION)
3077 globals = elf32_arm_hash_table (info);
3078 if (globals == NULL)
3081 thumb_only = using_thumb_only (globals);
3083 thumb2 = using_thumb2 (globals);
3085 /* Determine where the call point is. */
3086 location = (input_sec->output_offset
3087 + input_sec->output_section->vma
3090 r_type = ELF32_R_TYPE (rel->r_info);
3092 /* Keep a simpler condition, for the sake of clarity. */
3093 if (globals->splt != NULL
3095 && hash->root.plt.offset != (bfd_vma) -1)
3099 /* Note when dealing with PLT entries: the main PLT stub is in
3100 ARM mode, so if the branch is in Thumb mode, another
3101 Thumb->ARM stub will be inserted later just before the ARM
3102 PLT stub. We don't take this extra distance into account
3103 here, because if a long branch stub is needed, we'll add a
3104 Thumb->Arm one and branch directly to the ARM PLT entry
3105 because it avoids spreading offset corrections in several
3108 destination = (globals->splt->output_section->vma
3109 + globals->splt->output_offset
3110 + hash->root.plt.offset);
3114 branch_offset = (bfd_signed_vma)(destination - location);
3116 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3118 /* Handle cases where:
3119 - this call goes too far (different Thumb/Thumb2 max
3121 - it's a Thumb->Arm call and blx is not available, or it's a
3122 Thumb->Arm branch (not bl). A stub is needed in this case,
3123 but only if this call is not through a PLT entry. Indeed,
3124 PLT stubs handle mode switching already.
3127 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3128 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3130 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3131 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3132 || ((st_type != STT_ARM_TFUNC)
3133 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3134 || (r_type == R_ARM_THM_JUMP24))
3137 if (st_type == STT_ARM_TFUNC)
3139 /* Thumb to thumb. */
3142 stub_type = (info->shared | globals->pic_veneer)
3144 ? ((globals->use_blx
3145 && (r_type ==R_ARM_THM_CALL))
3146 /* V5T and above. Stub starts with ARM code, so
3147 we must be able to switch mode before
3148 reaching it, which is only possible for 'bl'
3149 (ie R_ARM_THM_CALL relocation). */
3150 ? arm_stub_long_branch_any_thumb_pic
3151 /* On V4T, use Thumb code only. */
3152 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3154 /* non-PIC stubs. */
3155 : ((globals->use_blx
3156 && (r_type ==R_ARM_THM_CALL))
3157 /* V5T and above. */
3158 ? arm_stub_long_branch_any_any
3160 : arm_stub_long_branch_v4t_thumb_thumb);
3164 stub_type = (info->shared | globals->pic_veneer)
3166 ? arm_stub_long_branch_thumb_only_pic
3168 : arm_stub_long_branch_thumb_only;
3175 && sym_sec->owner != NULL
3176 && !INTERWORK_FLAG (sym_sec->owner))
3178 (*_bfd_error_handler)
3179 (_("%B(%s): warning: interworking not enabled.\n"
3180 " first occurrence: %B: Thumb call to ARM"),
3181 sym_sec->owner, input_bfd, name);
3184 stub_type = (info->shared | globals->pic_veneer)
3186 ? ((globals->use_blx
3187 && (r_type ==R_ARM_THM_CALL))
3188 /* V5T and above. */
3189 ? arm_stub_long_branch_any_arm_pic
3191 : arm_stub_long_branch_v4t_thumb_arm_pic)
3193 /* non-PIC stubs. */
3194 : ((globals->use_blx
3195 && (r_type ==R_ARM_THM_CALL))
3196 /* V5T and above. */
3197 ? arm_stub_long_branch_any_any
3199 : arm_stub_long_branch_v4t_thumb_arm);
3201 /* Handle v4t short branches. */
3202 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3203 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3204 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3205 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3209 else if (r_type == R_ARM_CALL
3210 || r_type == R_ARM_JUMP24
3211 || r_type == R_ARM_PLT32)
3213 if (st_type == STT_ARM_TFUNC)
3218 && sym_sec->owner != NULL
3219 && !INTERWORK_FLAG (sym_sec->owner))
3221 (*_bfd_error_handler)
3222 (_("%B(%s): warning: interworking not enabled.\n"
3223 " first occurrence: %B: ARM call to Thumb"),
3224 sym_sec->owner, input_bfd, name);
3227 /* We have an extra 2-bytes reach because of
3228 the mode change (bit 24 (H) of BLX encoding). */
3229 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3230 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3231 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3232 || (r_type == R_ARM_JUMP24)
3233 || (r_type == R_ARM_PLT32))
3235 stub_type = (info->shared | globals->pic_veneer)
3237 ? ((globals->use_blx)
3238 /* V5T and above. */
3239 ? arm_stub_long_branch_any_thumb_pic
3241 : arm_stub_long_branch_v4t_arm_thumb_pic)
3243 /* non-PIC stubs. */
3244 : ((globals->use_blx)
3245 /* V5T and above. */
3246 ? arm_stub_long_branch_any_any
3248 : arm_stub_long_branch_v4t_arm_thumb);
3254 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3255 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3257 stub_type = (info->shared | globals->pic_veneer)
3259 ? arm_stub_long_branch_any_arm_pic
3260 /* non-PIC stubs. */
3261 : arm_stub_long_branch_any_any;
3266 /* If a stub is needed, record the actual destination type. */
3267 if (stub_type != arm_stub_none)
3269 *actual_st_type = st_type;
3275 /* Build a name for an entry in the stub hash table. */
3278 elf32_arm_stub_name (const asection *input_section,
3279 const asection *sym_sec,
3280 const struct elf32_arm_link_hash_entry *hash,
3281 const Elf_Internal_Rela *rel,
3282 enum elf32_arm_stub_type stub_type)
3289 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3290 stub_name = (char *) bfd_malloc (len);
3291 if (stub_name != NULL)
3292 sprintf (stub_name, "%08x_%s+%x_%d",
3293 input_section->id & 0xffffffff,
3294 hash->root.root.root.string,
3295 (int) rel->r_addend & 0xffffffff,
3300 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3301 stub_name = (char *) bfd_malloc (len);
3302 if (stub_name != NULL)
3303 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3304 input_section->id & 0xffffffff,
3305 sym_sec->id & 0xffffffff,
3306 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3307 (int) rel->r_addend & 0xffffffff,
3314 /* Look up an entry in the stub hash. Stub entries are cached because
3315 creating the stub name takes a bit of time. */
3317 static struct elf32_arm_stub_hash_entry *
3318 elf32_arm_get_stub_entry (const asection *input_section,
3319 const asection *sym_sec,
3320 struct elf_link_hash_entry *hash,
3321 const Elf_Internal_Rela *rel,
3322 struct elf32_arm_link_hash_table *htab,
3323 enum elf32_arm_stub_type stub_type)
3325 struct elf32_arm_stub_hash_entry *stub_entry;
3326 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3327 const asection *id_sec;
3329 if ((input_section->flags & SEC_CODE) == 0)
3332 /* If this input section is part of a group of sections sharing one
3333 stub section, then use the id of the first section in the group.
3334 Stub names need to include a section id, as there may well be
3335 more than one stub used to reach say, printf, and we need to
3336 distinguish between them. */
3337 id_sec = htab->stub_group[input_section->id].link_sec;
3339 if (h != NULL && h->stub_cache != NULL
3340 && h->stub_cache->h == h
3341 && h->stub_cache->id_sec == id_sec
3342 && h->stub_cache->stub_type == stub_type)
3344 stub_entry = h->stub_cache;
3350 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3351 if (stub_name == NULL)
3354 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3355 stub_name, FALSE, FALSE);
3357 h->stub_cache = stub_entry;
3365 /* Find or create a stub section. Returns a pointer to the stub section, and
3366 the section to which the stub section will be attached (in *LINK_SEC_P).
3367 LINK_SEC_P may be NULL. */
3370 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3371 struct elf32_arm_link_hash_table *htab)
3376 link_sec = htab->stub_group[section->id].link_sec;
3377 stub_sec = htab->stub_group[section->id].stub_sec;
3378 if (stub_sec == NULL)
3380 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3381 if (stub_sec == NULL)
3387 namelen = strlen (link_sec->name);
3388 len = namelen + sizeof (STUB_SUFFIX);
3389 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3393 memcpy (s_name, link_sec->name, namelen);
3394 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3395 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3396 if (stub_sec == NULL)
3398 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3400 htab->stub_group[section->id].stub_sec = stub_sec;
3404 *link_sec_p = link_sec;
3409 /* Add a new stub entry to the stub hash. Not all fields of the new
3410 stub entry are initialised. */
3412 static struct elf32_arm_stub_hash_entry *
3413 elf32_arm_add_stub (const char *stub_name,
3415 struct elf32_arm_link_hash_table *htab)
3419 struct elf32_arm_stub_hash_entry *stub_entry;
3421 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3422 if (stub_sec == NULL)
3425 /* Enter this entry into the linker stub hash table. */
3426 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3428 if (stub_entry == NULL)
3430 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3436 stub_entry->stub_sec = stub_sec;
3437 stub_entry->stub_offset = 0;
3438 stub_entry->id_sec = link_sec;
3443 /* Store an Arm insn into an output section not processed by
3444 elf32_arm_write_section. */
3447 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3448 bfd * output_bfd, bfd_vma val, void * ptr)
3450 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3451 bfd_putl32 (val, ptr);
3453 bfd_putb32 (val, ptr);
3456 /* Store a 16-bit Thumb insn into an output section not processed by
3457 elf32_arm_write_section. */
3460 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3461 bfd * output_bfd, bfd_vma val, void * ptr)
3463 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3464 bfd_putl16 (val, ptr);
3466 bfd_putb16 (val, ptr);
3469 static bfd_reloc_status_type elf32_arm_final_link_relocate
3470 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3471 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3472 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3475 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3479 struct elf32_arm_stub_hash_entry *stub_entry;
3480 struct elf32_arm_link_hash_table *globals;
3481 struct bfd_link_info *info;
3489 const insn_sequence *template_sequence;
3491 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3492 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3495 /* Massage our args to the form they really have. */
3496 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3497 info = (struct bfd_link_info *) in_arg;
3499 globals = elf32_arm_hash_table (info);
3500 if (globals == NULL)
3503 stub_sec = stub_entry->stub_sec;
3505 if ((globals->fix_cortex_a8 < 0)
3506 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3507 /* We have to do the a8 fixes last, as they are less aligned than
3508 the other veneers. */
3511 /* Make a note of the offset within the stubs for this entry. */
3512 stub_entry->stub_offset = stub_sec->size;
3513 loc = stub_sec->contents + stub_entry->stub_offset;
3515 stub_bfd = stub_sec->owner;
3517 /* This is the address of the start of the stub. */
3518 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3519 + stub_entry->stub_offset;
3521 /* This is the address of the stub destination. */
3522 sym_value = (stub_entry->target_value
3523 + stub_entry->target_section->output_offset
3524 + stub_entry->target_section->output_section->vma);
3526 template_sequence = stub_entry->stub_template;
3527 template_size = stub_entry->stub_template_size;
3530 for (i = 0; i < template_size; i++)
3532 switch (template_sequence[i].type)
3536 bfd_vma data = (bfd_vma) template_sequence[i].data;
3537 if (template_sequence[i].reloc_addend != 0)
3539 /* We've borrowed the reloc_addend field to mean we should
3540 insert a condition code into this (Thumb-1 branch)
3541 instruction. See THUMB16_BCOND_INSN. */
3542 BFD_ASSERT ((data & 0xff00) == 0xd000);
3543 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3545 bfd_put_16 (stub_bfd, data, loc + size);
3551 bfd_put_16 (stub_bfd,
3552 (template_sequence[i].data >> 16) & 0xffff,
3554 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3556 if (template_sequence[i].r_type != R_ARM_NONE)
3558 stub_reloc_idx[nrelocs] = i;
3559 stub_reloc_offset[nrelocs++] = size;
3565 bfd_put_32 (stub_bfd, template_sequence[i].data,
3567 /* Handle cases where the target is encoded within the
3569 if (template_sequence[i].r_type == R_ARM_JUMP24)
3571 stub_reloc_idx[nrelocs] = i;
3572 stub_reloc_offset[nrelocs++] = size;
3578 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3579 stub_reloc_idx[nrelocs] = i;
3580 stub_reloc_offset[nrelocs++] = size;
3590 stub_sec->size += size;
3592 /* Stub size has already been computed in arm_size_one_stub. Check
3594 BFD_ASSERT (size == stub_entry->stub_size);
3596 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3597 if (stub_entry->st_type == STT_ARM_TFUNC)
3600 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3602 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3604 for (i = 0; i < nrelocs; i++)
3605 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3606 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3607 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3608 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3610 Elf_Internal_Rela rel;
3611 bfd_boolean unresolved_reloc;
3612 char *error_message;
3614 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3615 ? STT_ARM_TFUNC : 0;
3616 bfd_vma points_to = sym_value + stub_entry->target_addend;
3618 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3619 rel.r_info = ELF32_R_INFO (0,
3620 template_sequence[stub_reloc_idx[i]].r_type);
3621 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3623 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3624 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3625 template should refer back to the instruction after the original
3627 points_to = sym_value;
3629 /* There may be unintended consequences if this is not true. */
3630 BFD_ASSERT (stub_entry->h == NULL);
3632 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3633 properly. We should probably use this function unconditionally,
3634 rather than only for certain relocations listed in the enclosing
3635 conditional, for the sake of consistency. */
3636 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3637 (template_sequence[stub_reloc_idx[i]].r_type),
3638 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3639 points_to, info, stub_entry->target_section, "", sym_flags,
3640 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3645 Elf_Internal_Rela rel;
3646 bfd_boolean unresolved_reloc;
3647 char *error_message;
3648 bfd_vma points_to = sym_value + stub_entry->target_addend
3649 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3651 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3652 rel.r_info = ELF32_R_INFO (0,
3653 template_sequence[stub_reloc_idx[i]].r_type);
3656 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3657 (template_sequence[stub_reloc_idx[i]].r_type),
3658 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3659 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3660 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3668 /* Calculate the template, template size and instruction size for a stub.
3669 Return value is the instruction size. */
3672 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3673 const insn_sequence **stub_template,
3674 int *stub_template_size)
3676 const insn_sequence *template_sequence = NULL;
3677 int template_size = 0, i;
3680 template_sequence = stub_definitions[stub_type].template_sequence;
3681 template_size = stub_definitions[stub_type].template_size;
3684 for (i = 0; i < template_size; i++)
3686 switch (template_sequence[i].type)
3705 *stub_template = template_sequence;
3707 if (stub_template_size)
3708 *stub_template_size = template_size;
3713 /* As above, but don't actually build the stub. Just bump offset so
3714 we know stub section sizes. */
3717 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3720 struct elf32_arm_stub_hash_entry *stub_entry;
3721 struct elf32_arm_link_hash_table *htab;
3722 const insn_sequence *template_sequence;
3723 int template_size, size;
3725 /* Massage our args to the form they really have. */
3726 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3727 htab = (struct elf32_arm_link_hash_table *) in_arg;
3729 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3730 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3732 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3735 stub_entry->stub_size = size;
3736 stub_entry->stub_template = template_sequence;
3737 stub_entry->stub_template_size = template_size;
3739 size = (size + 7) & ~7;
3740 stub_entry->stub_sec->size += size;
3745 /* External entry points for sizing and building linker stubs. */
3747 /* Set up various things so that we can make a list of input sections
3748 for each output section included in the link. Returns -1 on error,
3749 0 when no stubs will be needed, and 1 on success. */
3752 elf32_arm_setup_section_lists (bfd *output_bfd,
3753 struct bfd_link_info *info)
3756 unsigned int bfd_count;
3757 int top_id, top_index;
3759 asection **input_list, **list;
3761 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3765 if (! is_elf_hash_table (htab))
3768 /* Count the number of input BFDs and find the top input section id. */
3769 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3771 input_bfd = input_bfd->link_next)
3774 for (section = input_bfd->sections;
3776 section = section->next)
3778 if (top_id < section->id)
3779 top_id = section->id;
3782 htab->bfd_count = bfd_count;
3784 amt = sizeof (struct map_stub) * (top_id + 1);
3785 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3786 if (htab->stub_group == NULL)
3788 htab->top_id = top_id;
3790 /* We can't use output_bfd->section_count here to find the top output
3791 section index as some sections may have been removed, and
3792 _bfd_strip_section_from_output doesn't renumber the indices. */
3793 for (section = output_bfd->sections, top_index = 0;
3795 section = section->next)
3797 if (top_index < section->index)
3798 top_index = section->index;
3801 htab->top_index = top_index;
3802 amt = sizeof (asection *) * (top_index + 1);
3803 input_list = (asection **) bfd_malloc (amt);
3804 htab->input_list = input_list;
3805 if (input_list == NULL)
3808 /* For sections we aren't interested in, mark their entries with a
3809 value we can check later. */
3810 list = input_list + top_index;
3812 *list = bfd_abs_section_ptr;
3813 while (list-- != input_list);
3815 for (section = output_bfd->sections;
3817 section = section->next)
3819 if ((section->flags & SEC_CODE) != 0)
3820 input_list[section->index] = NULL;
3826 /* The linker repeatedly calls this function for each input section,
3827 in the order that input sections are linked into output sections.
3828 Build lists of input sections to determine groupings between which
3829 we may insert linker stubs. */
3832 elf32_arm_next_input_section (struct bfd_link_info *info,
3835 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3840 if (isec->output_section->index <= htab->top_index)
3842 asection **list = htab->input_list + isec->output_section->index;
3844 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3846 /* Steal the link_sec pointer for our list. */
3847 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3848 /* This happens to make the list in reverse order,
3849 which we reverse later. */
3850 PREV_SEC (isec) = *list;
3856 /* See whether we can group stub sections together. Grouping stub
3857 sections may result in fewer stubs. More importantly, we need to
3858 put all .init* and .fini* stubs at the end of the .init or
3859 .fini output sections respectively, because glibc splits the
3860 _init and _fini functions into multiple parts. Putting a stub in
3861 the middle of a function is not a good idea. */
3864 group_sections (struct elf32_arm_link_hash_table *htab,
3865 bfd_size_type stub_group_size,
3866 bfd_boolean stubs_always_after_branch)
3868 asection **list = htab->input_list;
3872 asection *tail = *list;
3875 if (tail == bfd_abs_section_ptr)
3878 /* Reverse the list: we must avoid placing stubs at the
3879 beginning of the section because the beginning of the text
3880 section may be required for an interrupt vector in bare metal
3882 #define NEXT_SEC PREV_SEC
3884 while (tail != NULL)
3886 /* Pop from tail. */
3887 asection *item = tail;
3888 tail = PREV_SEC (item);
3891 NEXT_SEC (item) = head;
3895 while (head != NULL)
3899 bfd_vma stub_group_start = head->output_offset;
3900 bfd_vma end_of_next;
3903 while (NEXT_SEC (curr) != NULL)
3905 next = NEXT_SEC (curr);
3906 end_of_next = next->output_offset + next->size;
3907 if (end_of_next - stub_group_start >= stub_group_size)
3908 /* End of NEXT is too far from start, so stop. */
3910 /* Add NEXT to the group. */
3914 /* OK, the size from the start to the start of CURR is less
3915 than stub_group_size and thus can be handled by one stub
3916 section. (Or the head section is itself larger than
3917 stub_group_size, in which case we may be toast.)
3918 We should really be keeping track of the total size of
3919 stubs added here, as stubs contribute to the final output
3923 next = NEXT_SEC (head);
3924 /* Set up this stub group. */
3925 htab->stub_group[head->id].link_sec = curr;
3927 while (head != curr && (head = next) != NULL);
3929 /* But wait, there's more! Input sections up to stub_group_size
3930 bytes after the stub section can be handled by it too. */
3931 if (!stubs_always_after_branch)
3933 stub_group_start = curr->output_offset + curr->size;
3935 while (next != NULL)
3937 end_of_next = next->output_offset + next->size;
3938 if (end_of_next - stub_group_start >= stub_group_size)
3939 /* End of NEXT is too far from stubs, so stop. */
3941 /* Add NEXT to the stub group. */
3943 next = NEXT_SEC (head);
3944 htab->stub_group[head->id].link_sec = curr;
3950 while (list++ != htab->input_list + htab->top_index);
3952 free (htab->input_list);
3957 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3961 a8_reloc_compare (const void *a, const void *b)
3963 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3964 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3966 if (ra->from < rb->from)
3968 else if (ra->from > rb->from)
3974 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3975 const char *, char **);
3977 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3978 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3979 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3983 cortex_a8_erratum_scan (bfd *input_bfd,
3984 struct bfd_link_info *info,
3985 struct a8_erratum_fix **a8_fixes_p,
3986 unsigned int *num_a8_fixes_p,
3987 unsigned int *a8_fix_table_size_p,
3988 struct a8_erratum_reloc *a8_relocs,
3989 unsigned int num_a8_relocs,
3990 unsigned prev_num_a8_fixes,
3991 bfd_boolean *stub_changed_p)
3994 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3995 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3996 unsigned int num_a8_fixes = *num_a8_fixes_p;
3997 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4002 for (section = input_bfd->sections;
4004 section = section->next)
4006 bfd_byte *contents = NULL;
4007 struct _arm_elf_section_data *sec_data;
4011 if (elf_section_type (section) != SHT_PROGBITS
4012 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4013 || (section->flags & SEC_EXCLUDE) != 0
4014 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4015 || (section->output_section == bfd_abs_section_ptr))
4018 base_vma = section->output_section->vma + section->output_offset;
4020 if (elf_section_data (section)->this_hdr.contents != NULL)
4021 contents = elf_section_data (section)->this_hdr.contents;
4022 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4025 sec_data = elf32_arm_section_data (section);
4027 for (span = 0; span < sec_data->mapcount; span++)
4029 unsigned int span_start = sec_data->map[span].vma;
4030 unsigned int span_end = (span == sec_data->mapcount - 1)
4031 ? section->size : sec_data->map[span + 1].vma;
4033 char span_type = sec_data->map[span].type;
4034 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4036 if (span_type != 't')
4039 /* Span is entirely within a single 4KB region: skip scanning. */
4040 if (((base_vma + span_start) & ~0xfff)
4041 == ((base_vma + span_end) & ~0xfff))
4044 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4046 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4047 * The branch target is in the same 4KB region as the
4048 first half of the branch.
4049 * The instruction before the branch is a 32-bit
4050 length non-branch instruction. */
4051 for (i = span_start; i < span_end;)
4053 unsigned int insn = bfd_getl16 (&contents[i]);
4054 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4055 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4057 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4062 /* Load the rest of the insn (in manual-friendly order). */
4063 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4065 /* Encoding T4: B<c>.W. */
4066 is_b = (insn & 0xf800d000) == 0xf0009000;
4067 /* Encoding T1: BL<c>.W. */
4068 is_bl = (insn & 0xf800d000) == 0xf000d000;
4069 /* Encoding T2: BLX<c>.W. */
4070 is_blx = (insn & 0xf800d000) == 0xf000c000;
4071 /* Encoding T3: B<c>.W (not permitted in IT block). */
4072 is_bcc = (insn & 0xf800d000) == 0xf0008000
4073 && (insn & 0x07f00000) != 0x03800000;
4076 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4078 if (((base_vma + i) & 0xfff) == 0xffe
4082 && ! last_was_branch)
4084 bfd_signed_vma offset = 0;
4085 bfd_boolean force_target_arm = FALSE;
4086 bfd_boolean force_target_thumb = FALSE;
4088 enum elf32_arm_stub_type stub_type = arm_stub_none;
4089 struct a8_erratum_reloc key, *found;
4091 key.from = base_vma + i;
4092 found = (struct a8_erratum_reloc *)
4093 bsearch (&key, a8_relocs, num_a8_relocs,
4094 sizeof (struct a8_erratum_reloc),
4099 char *error_message = NULL;
4100 struct elf_link_hash_entry *entry;
4102 /* We don't care about the error returned from this
4103 function, only if there is glue or not. */
4104 entry = find_thumb_glue (info, found->sym_name,
4108 found->non_a8_stub = TRUE;
4110 if (found->r_type == R_ARM_THM_CALL
4111 && found->st_type != STT_ARM_TFUNC)
4112 force_target_arm = TRUE;
4113 else if (found->r_type == R_ARM_THM_CALL
4114 && found->st_type == STT_ARM_TFUNC)
4115 force_target_thumb = TRUE;
4118 /* Check if we have an offending branch instruction. */
4120 if (found && found->non_a8_stub)
4121 /* We've already made a stub for this instruction, e.g.
4122 it's a long branch or a Thumb->ARM stub. Assume that
4123 stub will suffice to work around the A8 erratum (see
4124 setting of always_after_branch above). */
4128 offset = (insn & 0x7ff) << 1;
4129 offset |= (insn & 0x3f0000) >> 4;
4130 offset |= (insn & 0x2000) ? 0x40000 : 0;
4131 offset |= (insn & 0x800) ? 0x80000 : 0;
4132 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4133 if (offset & 0x100000)
4134 offset |= ~ ((bfd_signed_vma) 0xfffff);
4135 stub_type = arm_stub_a8_veneer_b_cond;
4137 else if (is_b || is_bl || is_blx)
4139 int s = (insn & 0x4000000) != 0;
4140 int j1 = (insn & 0x2000) != 0;
4141 int j2 = (insn & 0x800) != 0;
4145 offset = (insn & 0x7ff) << 1;
4146 offset |= (insn & 0x3ff0000) >> 4;
4150 if (offset & 0x1000000)
4151 offset |= ~ ((bfd_signed_vma) 0xffffff);
4154 offset &= ~ ((bfd_signed_vma) 3);
4156 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4157 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4160 if (stub_type != arm_stub_none)
4162 bfd_vma pc_for_insn = base_vma + i + 4;
4164 /* The original instruction is a BL, but the target is
4165 an ARM instruction. If we were not making a stub,
4166 the BL would have been converted to a BLX. Use the
4167 BLX stub instead in that case. */
4168 if (htab->use_blx && force_target_arm
4169 && stub_type == arm_stub_a8_veneer_bl)
4171 stub_type = arm_stub_a8_veneer_blx;
4175 /* Conversely, if the original instruction was
4176 BLX but the target is Thumb mode, use the BL
4178 else if (force_target_thumb
4179 && stub_type == arm_stub_a8_veneer_blx)
4181 stub_type = arm_stub_a8_veneer_bl;
4187 pc_for_insn &= ~ ((bfd_vma) 3);
4189 /* If we found a relocation, use the proper destination,
4190 not the offset in the (unrelocated) instruction.
4191 Note this is always done if we switched the stub type
4195 (bfd_signed_vma) (found->destination - pc_for_insn);
4197 target = pc_for_insn + offset;
4199 /* The BLX stub is ARM-mode code. Adjust the offset to
4200 take the different PC value (+8 instead of +4) into
4202 if (stub_type == arm_stub_a8_veneer_blx)
4205 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4207 char *stub_name = NULL;
4209 if (num_a8_fixes == a8_fix_table_size)
4211 a8_fix_table_size *= 2;
4212 a8_fixes = (struct a8_erratum_fix *)
4213 bfd_realloc (a8_fixes,
4214 sizeof (struct a8_erratum_fix)
4215 * a8_fix_table_size);
4218 if (num_a8_fixes < prev_num_a8_fixes)
4220 /* If we're doing a subsequent scan,
4221 check if we've found the same fix as
4222 before, and try and reuse the stub
4224 stub_name = a8_fixes[num_a8_fixes].stub_name;
4225 if ((a8_fixes[num_a8_fixes].section != section)
4226 || (a8_fixes[num_a8_fixes].offset != i))
4230 *stub_changed_p = TRUE;
4236 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4237 if (stub_name != NULL)
4238 sprintf (stub_name, "%x:%x", section->id, i);
4241 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4242 a8_fixes[num_a8_fixes].section = section;
4243 a8_fixes[num_a8_fixes].offset = i;
4244 a8_fixes[num_a8_fixes].addend = offset;
4245 a8_fixes[num_a8_fixes].orig_insn = insn;
4246 a8_fixes[num_a8_fixes].stub_name = stub_name;
4247 a8_fixes[num_a8_fixes].stub_type = stub_type;
4248 a8_fixes[num_a8_fixes].st_type =
4249 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4256 i += insn_32bit ? 4 : 2;
4257 last_was_32bit = insn_32bit;
4258 last_was_branch = is_32bit_branch;
4262 if (elf_section_data (section)->this_hdr.contents == NULL)
4266 *a8_fixes_p = a8_fixes;
4267 *num_a8_fixes_p = num_a8_fixes;
4268 *a8_fix_table_size_p = a8_fix_table_size;
4273 /* Determine and set the size of the stub section for a final link.
4275 The basic idea here is to examine all the relocations looking for
4276 PC-relative calls to a target that is unreachable with a "bl"
4280 elf32_arm_size_stubs (bfd *output_bfd,
4282 struct bfd_link_info *info,
4283 bfd_signed_vma group_size,
4284 asection * (*add_stub_section) (const char *, asection *),
4285 void (*layout_sections_again) (void))
4287 bfd_size_type stub_group_size;
4288 bfd_boolean stubs_always_after_branch;
4289 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4290 struct a8_erratum_fix *a8_fixes = NULL;
4291 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4292 struct a8_erratum_reloc *a8_relocs = NULL;
4293 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4298 if (htab->fix_cortex_a8)
4300 a8_fixes = (struct a8_erratum_fix *)
4301 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4302 a8_relocs = (struct a8_erratum_reloc *)
4303 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4306 /* Propagate mach to stub bfd, because it may not have been
4307 finalized when we created stub_bfd. */
4308 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4309 bfd_get_mach (output_bfd));
4311 /* Stash our params away. */
4312 htab->stub_bfd = stub_bfd;
4313 htab->add_stub_section = add_stub_section;
4314 htab->layout_sections_again = layout_sections_again;
4315 stubs_always_after_branch = group_size < 0;
4317 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4318 as the first half of a 32-bit branch straddling two 4K pages. This is a
4319 crude way of enforcing that. */
4320 if (htab->fix_cortex_a8)
4321 stubs_always_after_branch = 1;
4324 stub_group_size = -group_size;
4326 stub_group_size = group_size;
4328 if (stub_group_size == 1)
4330 /* Default values. */
4331 /* Thumb branch range is +-4MB has to be used as the default
4332 maximum size (a given section can contain both ARM and Thumb
4333 code, so the worst case has to be taken into account).
4335 This value is 24K less than that, which allows for 2025
4336 12-byte stubs. If we exceed that, then we will fail to link.
4337 The user will have to relink with an explicit group size
4339 stub_group_size = 4170000;
4342 group_sections (htab, stub_group_size, stubs_always_after_branch);
4344 /* If we're applying the cortex A8 fix, we need to determine the
4345 program header size now, because we cannot change it later --
4346 that could alter section placements. Notice the A8 erratum fix
4347 ends up requiring the section addresses to remain unchanged
4348 modulo the page size. That's something we cannot represent
4349 inside BFD, and we don't want to force the section alignment to
4350 be the page size. */
4351 if (htab->fix_cortex_a8)
4352 (*htab->layout_sections_again) ();
4357 unsigned int bfd_indx;
4359 bfd_boolean stub_changed = FALSE;
4360 unsigned prev_num_a8_fixes = num_a8_fixes;
4363 for (input_bfd = info->input_bfds, bfd_indx = 0;
4365 input_bfd = input_bfd->link_next, bfd_indx++)
4367 Elf_Internal_Shdr *symtab_hdr;
4369 Elf_Internal_Sym *local_syms = NULL;
4373 /* We'll need the symbol table in a second. */
4374 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4375 if (symtab_hdr->sh_info == 0)
4378 /* Walk over each section attached to the input bfd. */
4379 for (section = input_bfd->sections;
4381 section = section->next)
4383 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4385 /* If there aren't any relocs, then there's nothing more
4387 if ((section->flags & SEC_RELOC) == 0
4388 || section->reloc_count == 0
4389 || (section->flags & SEC_CODE) == 0)
4392 /* If this section is a link-once section that will be
4393 discarded, then don't create any stubs. */
4394 if (section->output_section == NULL
4395 || section->output_section->owner != output_bfd)
4398 /* Get the relocs. */
4400 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4401 NULL, info->keep_memory);
4402 if (internal_relocs == NULL)
4403 goto error_ret_free_local;
4405 /* Now examine each relocation. */
4406 irela = internal_relocs;
4407 irelaend = irela + section->reloc_count;
4408 for (; irela < irelaend; irela++)
4410 unsigned int r_type, r_indx;
4411 enum elf32_arm_stub_type stub_type;
4412 struct elf32_arm_stub_hash_entry *stub_entry;
4415 bfd_vma destination;
4416 struct elf32_arm_link_hash_entry *hash;
4417 const char *sym_name;
4419 const asection *id_sec;
4421 bfd_boolean created_stub = FALSE;
4423 r_type = ELF32_R_TYPE (irela->r_info);
4424 r_indx = ELF32_R_SYM (irela->r_info);
4426 if (r_type >= (unsigned int) R_ARM_max)
4428 bfd_set_error (bfd_error_bad_value);
4429 error_ret_free_internal:
4430 if (elf_section_data (section)->relocs == NULL)
4431 free (internal_relocs);
4432 goto error_ret_free_local;
4435 /* Only look for stubs on branch instructions. */
4436 if ((r_type != (unsigned int) R_ARM_CALL)
4437 && (r_type != (unsigned int) R_ARM_THM_CALL)
4438 && (r_type != (unsigned int) R_ARM_JUMP24)
4439 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4440 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4441 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4442 && (r_type != (unsigned int) R_ARM_PLT32))
4445 /* Now determine the call target, its name, value,
4452 if (r_indx < symtab_hdr->sh_info)
4454 /* It's a local symbol. */
4455 Elf_Internal_Sym *sym;
4456 Elf_Internal_Shdr *hdr;
4458 if (local_syms == NULL)
4461 = (Elf_Internal_Sym *) symtab_hdr->contents;
4462 if (local_syms == NULL)
4464 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4465 symtab_hdr->sh_info, 0,
4467 if (local_syms == NULL)
4468 goto error_ret_free_internal;
4471 sym = local_syms + r_indx;
4472 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4473 sym_sec = hdr->bfd_section;
4475 /* This is an undefined symbol. It can never
4479 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4480 sym_value = sym->st_value;
4481 destination = (sym_value + irela->r_addend
4482 + sym_sec->output_offset
4483 + sym_sec->output_section->vma);
4484 st_type = ELF_ST_TYPE (sym->st_info);
4486 = bfd_elf_string_from_elf_section (input_bfd,
4487 symtab_hdr->sh_link,
4492 /* It's an external symbol. */
4495 e_indx = r_indx - symtab_hdr->sh_info;
4496 hash = ((struct elf32_arm_link_hash_entry *)
4497 elf_sym_hashes (input_bfd)[e_indx]);
4499 while (hash->root.root.type == bfd_link_hash_indirect
4500 || hash->root.root.type == bfd_link_hash_warning)
4501 hash = ((struct elf32_arm_link_hash_entry *)
4502 hash->root.root.u.i.link);
4504 if (hash->root.root.type == bfd_link_hash_defined
4505 || hash->root.root.type == bfd_link_hash_defweak)
4507 sym_sec = hash->root.root.u.def.section;
4508 sym_value = hash->root.root.u.def.value;
4510 struct elf32_arm_link_hash_table *globals =
4511 elf32_arm_hash_table (info);
4513 /* For a destination in a shared library,
4514 use the PLT stub as target address to
4515 decide whether a branch stub is
4518 && globals->splt != NULL
4520 && hash->root.plt.offset != (bfd_vma) -1)
4522 sym_sec = globals->splt;
4523 sym_value = hash->root.plt.offset;
4524 if (sym_sec->output_section != NULL)
4525 destination = (sym_value
4526 + sym_sec->output_offset
4527 + sym_sec->output_section->vma);
4529 else if (sym_sec->output_section != NULL)
4530 destination = (sym_value + irela->r_addend
4531 + sym_sec->output_offset
4532 + sym_sec->output_section->vma);
4534 else if ((hash->root.root.type == bfd_link_hash_undefined)
4535 || (hash->root.root.type == bfd_link_hash_undefweak))
4537 /* For a shared library, use the PLT stub as
4538 target address to decide whether a long
4539 branch stub is needed.
4540 For absolute code, they cannot be handled. */
4541 struct elf32_arm_link_hash_table *globals =
4542 elf32_arm_hash_table (info);
4545 && globals->splt != NULL
4547 && hash->root.plt.offset != (bfd_vma) -1)
4549 sym_sec = globals->splt;
4550 sym_value = hash->root.plt.offset;
4551 if (sym_sec->output_section != NULL)
4552 destination = (sym_value
4553 + sym_sec->output_offset
4554 + sym_sec->output_section->vma);
4561 bfd_set_error (bfd_error_bad_value);
4562 goto error_ret_free_internal;
4564 st_type = ELF_ST_TYPE (hash->root.type);
4565 sym_name = hash->root.root.root.string;
4570 /* Determine what (if any) linker stub is needed. */
4571 stub_type = arm_type_of_stub (info, section, irela,
4573 destination, sym_sec,
4574 input_bfd, sym_name);
4575 if (stub_type == arm_stub_none)
4578 /* Support for grouping stub sections. */
4579 id_sec = htab->stub_group[section->id].link_sec;
4581 /* Get the name of this stub. */
4582 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4585 goto error_ret_free_internal;
4587 /* We've either created a stub for this reloc already,
4588 or we are about to. */
4589 created_stub = TRUE;
4591 stub_entry = arm_stub_hash_lookup
4592 (&htab->stub_hash_table, stub_name,
4594 if (stub_entry != NULL)
4596 /* The proper stub has already been created. */
4598 stub_entry->target_value = sym_value;
4602 stub_entry = elf32_arm_add_stub (stub_name, section,
4604 if (stub_entry == NULL)
4607 goto error_ret_free_internal;
4610 stub_entry->target_value = sym_value;
4611 stub_entry->target_section = sym_sec;
4612 stub_entry->stub_type = stub_type;
4613 stub_entry->h = hash;
4614 stub_entry->st_type = st_type;
4616 if (sym_name == NULL)
4617 sym_name = "unnamed";
4618 stub_entry->output_name = (char *)
4619 bfd_alloc (htab->stub_bfd,
4620 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4621 + strlen (sym_name));
4622 if (stub_entry->output_name == NULL)
4625 goto error_ret_free_internal;
4628 /* For historical reasons, use the existing names for
4629 ARM-to-Thumb and Thumb-to-ARM stubs. */
4630 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4631 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4632 && st_type != STT_ARM_TFUNC)
4633 sprintf (stub_entry->output_name,
4634 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4635 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4636 || (r_type == (unsigned int) R_ARM_JUMP24))
4637 && st_type == STT_ARM_TFUNC)
4638 sprintf (stub_entry->output_name,
4639 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4641 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4644 stub_changed = TRUE;
4648 /* Look for relocations which might trigger Cortex-A8
4650 if (htab->fix_cortex_a8
4651 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4652 || r_type == (unsigned int) R_ARM_THM_JUMP19
4653 || r_type == (unsigned int) R_ARM_THM_CALL
4654 || r_type == (unsigned int) R_ARM_THM_XPC22))
4656 bfd_vma from = section->output_section->vma
4657 + section->output_offset
4660 if ((from & 0xfff) == 0xffe)
4662 /* Found a candidate. Note we haven't checked the
4663 destination is within 4K here: if we do so (and
4664 don't create an entry in a8_relocs) we can't tell
4665 that a branch should have been relocated when
4667 if (num_a8_relocs == a8_reloc_table_size)
4669 a8_reloc_table_size *= 2;
4670 a8_relocs = (struct a8_erratum_reloc *)
4671 bfd_realloc (a8_relocs,
4672 sizeof (struct a8_erratum_reloc)
4673 * a8_reloc_table_size);
4676 a8_relocs[num_a8_relocs].from = from;
4677 a8_relocs[num_a8_relocs].destination = destination;
4678 a8_relocs[num_a8_relocs].r_type = r_type;
4679 a8_relocs[num_a8_relocs].st_type = st_type;
4680 a8_relocs[num_a8_relocs].sym_name = sym_name;
4681 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4688 /* We're done with the internal relocs, free them. */
4689 if (elf_section_data (section)->relocs == NULL)
4690 free (internal_relocs);
4693 if (htab->fix_cortex_a8)
4695 /* Sort relocs which might apply to Cortex-A8 erratum. */
4696 qsort (a8_relocs, num_a8_relocs,
4697 sizeof (struct a8_erratum_reloc),
4700 /* Scan for branches which might trigger Cortex-A8 erratum. */
4701 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4702 &num_a8_fixes, &a8_fix_table_size,
4703 a8_relocs, num_a8_relocs,
4704 prev_num_a8_fixes, &stub_changed)
4706 goto error_ret_free_local;
4710 if (prev_num_a8_fixes != num_a8_fixes)
4711 stub_changed = TRUE;
4716 /* OK, we've added some stubs. Find out the new size of the
4718 for (stub_sec = htab->stub_bfd->sections;
4720 stub_sec = stub_sec->next)
4722 /* Ignore non-stub sections. */
4723 if (!strstr (stub_sec->name, STUB_SUFFIX))
4729 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4731 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4732 if (htab->fix_cortex_a8)
4733 for (i = 0; i < num_a8_fixes; i++)
4735 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4736 a8_fixes[i].section, htab);
4738 if (stub_sec == NULL)
4739 goto error_ret_free_local;
4742 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4747 /* Ask the linker to do its stuff. */
4748 (*htab->layout_sections_again) ();
4751 /* Add stubs for Cortex-A8 erratum fixes now. */
4752 if (htab->fix_cortex_a8)
4754 for (i = 0; i < num_a8_fixes; i++)
4756 struct elf32_arm_stub_hash_entry *stub_entry;
4757 char *stub_name = a8_fixes[i].stub_name;
4758 asection *section = a8_fixes[i].section;
4759 unsigned int section_id = a8_fixes[i].section->id;
4760 asection *link_sec = htab->stub_group[section_id].link_sec;
4761 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4762 const insn_sequence *template_sequence;
4763 int template_size, size = 0;
4765 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4767 if (stub_entry == NULL)
4769 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4775 stub_entry->stub_sec = stub_sec;
4776 stub_entry->stub_offset = 0;
4777 stub_entry->id_sec = link_sec;
4778 stub_entry->stub_type = a8_fixes[i].stub_type;
4779 stub_entry->target_section = a8_fixes[i].section;
4780 stub_entry->target_value = a8_fixes[i].offset;
4781 stub_entry->target_addend = a8_fixes[i].addend;
4782 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4783 stub_entry->st_type = a8_fixes[i].st_type;
4785 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4789 stub_entry->stub_size = size;
4790 stub_entry->stub_template = template_sequence;
4791 stub_entry->stub_template_size = template_size;
4794 /* Stash the Cortex-A8 erratum fix array for use later in
4795 elf32_arm_write_section(). */
4796 htab->a8_erratum_fixes = a8_fixes;
4797 htab->num_a8_erratum_fixes = num_a8_fixes;
4801 htab->a8_erratum_fixes = NULL;
4802 htab->num_a8_erratum_fixes = 0;
4806 error_ret_free_local:
4810 /* Build all the stubs associated with the current output file. The
4811 stubs are kept in a hash table attached to the main linker hash
4812 table. We also set up the .plt entries for statically linked PIC
4813 functions here. This function is called via arm_elf_finish in the
4817 elf32_arm_build_stubs (struct bfd_link_info *info)
4820 struct bfd_hash_table *table;
4821 struct elf32_arm_link_hash_table *htab;
4823 htab = elf32_arm_hash_table (info);
4827 for (stub_sec = htab->stub_bfd->sections;
4829 stub_sec = stub_sec->next)
4833 /* Ignore non-stub sections. */
4834 if (!strstr (stub_sec->name, STUB_SUFFIX))
4837 /* Allocate memory to hold the linker stubs. */
4838 size = stub_sec->size;
4839 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4840 if (stub_sec->contents == NULL && size != 0)
4845 /* Build the stubs as directed by the stub hash table. */
4846 table = &htab->stub_hash_table;
4847 bfd_hash_traverse (table, arm_build_one_stub, info);
4848 if (htab->fix_cortex_a8)
4850 /* Place the cortex a8 stubs last. */
4851 htab->fix_cortex_a8 = -1;
4852 bfd_hash_traverse (table, arm_build_one_stub, info);
4858 /* Locate the Thumb encoded calling stub for NAME. */
4860 static struct elf_link_hash_entry *
4861 find_thumb_glue (struct bfd_link_info *link_info,
4863 char **error_message)
4866 struct elf_link_hash_entry *hash;
4867 struct elf32_arm_link_hash_table *hash_table;
4869 /* We need a pointer to the armelf specific hash table. */
4870 hash_table = elf32_arm_hash_table (link_info);
4871 if (hash_table == NULL)
4874 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4875 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4877 BFD_ASSERT (tmp_name);
4879 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4881 hash = elf_link_hash_lookup
4882 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4885 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4886 tmp_name, name) == -1)
4887 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4894 /* Locate the ARM encoded calling stub for NAME. */
4896 static struct elf_link_hash_entry *
4897 find_arm_glue (struct bfd_link_info *link_info,
4899 char **error_message)
4902 struct elf_link_hash_entry *myh;
4903 struct elf32_arm_link_hash_table *hash_table;
4905 /* We need a pointer to the elfarm specific hash table. */
4906 hash_table = elf32_arm_hash_table (link_info);
4907 if (hash_table == NULL)
4910 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4911 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4913 BFD_ASSERT (tmp_name);
4915 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4917 myh = elf_link_hash_lookup
4918 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4921 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4922 tmp_name, name) == -1)
4923 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4930 /* ARM->Thumb glue (static images):
4934 ldr r12, __func_addr
4937 .word func @ behave as if you saw a ARM_32 reloc.
4944 .word func @ behave as if you saw a ARM_32 reloc.
4946 (relocatable images)
4949 ldr r12, __func_offset
4955 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4956 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4957 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4958 static const insn32 a2t3_func_addr_insn = 0x00000001;
4960 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4961 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4962 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4964 #define ARM2THUMB_PIC_GLUE_SIZE 16
4965 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4966 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4967 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4969 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4973 __func_from_thumb: __func_from_thumb:
4975 nop ldr r6, __func_addr
4985 #define THUMB2ARM_GLUE_SIZE 8
4986 static const insn16 t2a1_bx_pc_insn = 0x4778;
4987 static const insn16 t2a2_noop_insn = 0x46c0;
4988 static const insn32 t2a3_b_insn = 0xea000000;
4990 #define VFP11_ERRATUM_VENEER_SIZE 8
4992 #define ARM_BX_VENEER_SIZE 12
4993 static const insn32 armbx1_tst_insn = 0xe3100001;
4994 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4995 static const insn32 armbx3_bx_insn = 0xe12fff10;
4997 #ifndef ELFARM_NABI_C_INCLUDED
4999 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5002 bfd_byte * contents;
5006 /* Do not include empty glue sections in the output. */
5009 s = bfd_get_section_by_name (abfd, name);
5011 s->flags |= SEC_EXCLUDE;
5016 BFD_ASSERT (abfd != NULL);
5018 s = bfd_get_section_by_name (abfd, name);
5019 BFD_ASSERT (s != NULL);
5021 contents = (bfd_byte *) bfd_alloc (abfd, size);
5023 BFD_ASSERT (s->size == size);
5024 s->contents = contents;
5028 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5030 struct elf32_arm_link_hash_table * globals;
5032 globals = elf32_arm_hash_table (info);
5033 BFD_ASSERT (globals != NULL);
5035 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5036 globals->arm_glue_size,
5037 ARM2THUMB_GLUE_SECTION_NAME);
5039 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5040 globals->thumb_glue_size,
5041 THUMB2ARM_GLUE_SECTION_NAME);
5043 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5044 globals->vfp11_erratum_glue_size,
5045 VFP11_ERRATUM_VENEER_SECTION_NAME);
5047 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5048 globals->bx_glue_size,
5049 ARM_BX_GLUE_SECTION_NAME);
5054 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5055 returns the symbol identifying the stub. */
5057 static struct elf_link_hash_entry *
5058 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5059 struct elf_link_hash_entry * h)
5061 const char * name = h->root.root.string;
5064 struct elf_link_hash_entry * myh;
5065 struct bfd_link_hash_entry * bh;
5066 struct elf32_arm_link_hash_table * globals;
5070 globals = elf32_arm_hash_table (link_info);
5071 BFD_ASSERT (globals != NULL);
5072 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5074 s = bfd_get_section_by_name
5075 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5077 BFD_ASSERT (s != NULL);
5079 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5080 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5082 BFD_ASSERT (tmp_name);
5084 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5086 myh = elf_link_hash_lookup
5087 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5091 /* We've already seen this guy. */
5096 /* The only trick here is using hash_table->arm_glue_size as the value.
5097 Even though the section isn't allocated yet, this is where we will be
5098 putting it. The +1 on the value marks that the stub has not been
5099 output yet - not that it is a Thumb function. */
5101 val = globals->arm_glue_size + 1;
5102 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5103 tmp_name, BSF_GLOBAL, s, val,
5104 NULL, TRUE, FALSE, &bh);
5106 myh = (struct elf_link_hash_entry *) bh;
5107 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5108 myh->forced_local = 1;
5112 if (link_info->shared || globals->root.is_relocatable_executable
5113 || globals->pic_veneer)
5114 size = ARM2THUMB_PIC_GLUE_SIZE;
5115 else if (globals->use_blx)
5116 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5118 size = ARM2THUMB_STATIC_GLUE_SIZE;
5121 globals->arm_glue_size += size;
5126 /* Allocate space for ARMv4 BX veneers. */
5129 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5132 struct elf32_arm_link_hash_table *globals;
5134 struct elf_link_hash_entry *myh;
5135 struct bfd_link_hash_entry *bh;
5138 /* BX PC does not need a veneer. */
5142 globals = elf32_arm_hash_table (link_info);
5143 BFD_ASSERT (globals != NULL);
5144 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5146 /* Check if this veneer has already been allocated. */
5147 if (globals->bx_glue_offset[reg])
5150 s = bfd_get_section_by_name
5151 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5153 BFD_ASSERT (s != NULL);
5155 /* Add symbol for veneer. */
5157 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5159 BFD_ASSERT (tmp_name);
5161 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5163 myh = elf_link_hash_lookup
5164 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5166 BFD_ASSERT (myh == NULL);
5169 val = globals->bx_glue_size;
5170 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5171 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5172 NULL, TRUE, FALSE, &bh);
5174 myh = (struct elf_link_hash_entry *) bh;
5175 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5176 myh->forced_local = 1;
5178 s->size += ARM_BX_VENEER_SIZE;
5179 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5180 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5184 /* Add an entry to the code/data map for section SEC. */
5187 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5189 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5190 unsigned int newidx;
5192 if (sec_data->map == NULL)
5194 sec_data->map = (elf32_arm_section_map *)
5195 bfd_malloc (sizeof (elf32_arm_section_map));
5196 sec_data->mapcount = 0;
5197 sec_data->mapsize = 1;
5200 newidx = sec_data->mapcount++;
5202 if (sec_data->mapcount > sec_data->mapsize)
5204 sec_data->mapsize *= 2;
5205 sec_data->map = (elf32_arm_section_map *)
5206 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5207 * sizeof (elf32_arm_section_map));
5212 sec_data->map[newidx].vma = vma;
5213 sec_data->map[newidx].type = type;
5218 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5219 veneers are handled for now. */
5222 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5223 elf32_vfp11_erratum_list *branch,
5225 asection *branch_sec,
5226 unsigned int offset)
5229 struct elf32_arm_link_hash_table *hash_table;
5231 struct elf_link_hash_entry *myh;
5232 struct bfd_link_hash_entry *bh;
5234 struct _arm_elf_section_data *sec_data;
5236 elf32_vfp11_erratum_list *newerr;
5238 hash_table = elf32_arm_hash_table (link_info);
5239 BFD_ASSERT (hash_table != NULL);
5240 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5242 s = bfd_get_section_by_name
5243 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5245 sec_data = elf32_arm_section_data (s);
5247 BFD_ASSERT (s != NULL);
5249 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5250 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5252 BFD_ASSERT (tmp_name);
5254 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5255 hash_table->num_vfp11_fixes);
5257 myh = elf_link_hash_lookup
5258 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5260 BFD_ASSERT (myh == NULL);
5263 val = hash_table->vfp11_erratum_glue_size;
5264 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5265 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5266 NULL, TRUE, FALSE, &bh);
5268 myh = (struct elf_link_hash_entry *) bh;
5269 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5270 myh->forced_local = 1;
5272 /* Link veneer back to calling location. */
5273 errcount = ++(sec_data->erratumcount);
5274 newerr = (elf32_vfp11_erratum_list *)
5275 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5277 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5279 newerr->u.v.branch = branch;
5280 newerr->u.v.id = hash_table->num_vfp11_fixes;
5281 branch->u.b.veneer = newerr;
5283 newerr->next = sec_data->erratumlist;
5284 sec_data->erratumlist = newerr;
5286 /* A symbol for the return from the veneer. */
5287 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5288 hash_table->num_vfp11_fixes);
5290 myh = elf_link_hash_lookup
5291 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5298 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5299 branch_sec, val, NULL, TRUE, FALSE, &bh);
5301 myh = (struct elf_link_hash_entry *) bh;
5302 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5303 myh->forced_local = 1;
5307 /* Generate a mapping symbol for the veneer section, and explicitly add an
5308 entry for that symbol to the code/data map for the section. */
5309 if (hash_table->vfp11_erratum_glue_size == 0)
5312 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5313 ever requires this erratum fix. */
5314 _bfd_generic_link_add_one_symbol (link_info,
5315 hash_table->bfd_of_glue_owner, "$a",
5316 BSF_LOCAL, s, 0, NULL,
5319 myh = (struct elf_link_hash_entry *) bh;
5320 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5321 myh->forced_local = 1;
5323 /* The elf32_arm_init_maps function only cares about symbols from input
5324 BFDs. We must make a note of this generated mapping symbol
5325 ourselves so that code byteswapping works properly in
5326 elf32_arm_write_section. */
5327 elf32_arm_section_map_add (s, 'a', 0);
5330 s->size += VFP11_ERRATUM_VENEER_SIZE;
5331 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5332 hash_table->num_vfp11_fixes++;
5334 /* The offset of the veneer. */
5338 #define ARM_GLUE_SECTION_FLAGS \
5339 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5340 | SEC_READONLY | SEC_LINKER_CREATED)
5342 /* Create a fake section for use by the ARM backend of the linker. */
5345 arm_make_glue_section (bfd * abfd, const char * name)
5349 sec = bfd_get_section_by_name (abfd, name);
5354 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5357 || !bfd_set_section_alignment (abfd, sec, 2))
5360 /* Set the gc mark to prevent the section from being removed by garbage
5361 collection, despite the fact that no relocs refer to this section. */
5367 /* Add the glue sections to ABFD. This function is called from the
5368 linker scripts in ld/emultempl/{armelf}.em. */
5371 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5372 struct bfd_link_info *info)
5374 /* If we are only performing a partial
5375 link do not bother adding the glue. */
5376 if (info->relocatable)
5379 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5380 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5381 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5382 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5385 /* Select a BFD to be used to hold the sections used by the glue code.
5386 This function is called from the linker scripts in ld/emultempl/
5390 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5392 struct elf32_arm_link_hash_table *globals;
5394 /* If we are only performing a partial link
5395 do not bother getting a bfd to hold the glue. */
5396 if (info->relocatable)
5399 /* Make sure we don't attach the glue sections to a dynamic object. */
5400 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5402 globals = elf32_arm_hash_table (info);
5403 BFD_ASSERT (globals != NULL);
5405 if (globals->bfd_of_glue_owner != NULL)
5408 /* Save the bfd for later use. */
5409 globals->bfd_of_glue_owner = abfd;
5415 check_use_blx (struct elf32_arm_link_hash_table *globals)
5417 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5419 globals->use_blx = 1;
5423 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5424 struct bfd_link_info *link_info)
5426 Elf_Internal_Shdr *symtab_hdr;
5427 Elf_Internal_Rela *internal_relocs = NULL;
5428 Elf_Internal_Rela *irel, *irelend;
5429 bfd_byte *contents = NULL;
5432 struct elf32_arm_link_hash_table *globals;
5434 /* If we are only performing a partial link do not bother
5435 to construct any glue. */
5436 if (link_info->relocatable)
5439 /* Here we have a bfd that is to be included on the link. We have a
5440 hook to do reloc rummaging, before section sizes are nailed down. */
5441 globals = elf32_arm_hash_table (link_info);
5442 BFD_ASSERT (globals != NULL);
5444 check_use_blx (globals);
5446 if (globals->byteswap_code && !bfd_big_endian (abfd))
5448 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5453 /* PR 5398: If we have not decided to include any loadable sections in
5454 the output then we will not have a glue owner bfd. This is OK, it
5455 just means that there is nothing else for us to do here. */
5456 if (globals->bfd_of_glue_owner == NULL)
5459 /* Rummage around all the relocs and map the glue vectors. */
5460 sec = abfd->sections;
5465 for (; sec != NULL; sec = sec->next)
5467 if (sec->reloc_count == 0)
5470 if ((sec->flags & SEC_EXCLUDE) != 0)
5473 symtab_hdr = & elf_symtab_hdr (abfd);
5475 /* Load the relocs. */
5477 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5479 if (internal_relocs == NULL)
5482 irelend = internal_relocs + sec->reloc_count;
5483 for (irel = internal_relocs; irel < irelend; irel++)
5486 unsigned long r_index;
5488 struct elf_link_hash_entry *h;
5490 r_type = ELF32_R_TYPE (irel->r_info);
5491 r_index = ELF32_R_SYM (irel->r_info);
5493 /* These are the only relocation types we care about. */
5494 if ( r_type != R_ARM_PC24
5495 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5498 /* Get the section contents if we haven't done so already. */
5499 if (contents == NULL)
5501 /* Get cached copy if it exists. */
5502 if (elf_section_data (sec)->this_hdr.contents != NULL)
5503 contents = elf_section_data (sec)->this_hdr.contents;
5506 /* Go get them off disk. */
5507 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5512 if (r_type == R_ARM_V4BX)
5516 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5517 record_arm_bx_glue (link_info, reg);
5521 /* If the relocation is not against a symbol it cannot concern us. */
5524 /* We don't care about local symbols. */
5525 if (r_index < symtab_hdr->sh_info)
5528 /* This is an external symbol. */
5529 r_index -= symtab_hdr->sh_info;
5530 h = (struct elf_link_hash_entry *)
5531 elf_sym_hashes (abfd)[r_index];
5533 /* If the relocation is against a static symbol it must be within
5534 the current section and so cannot be a cross ARM/Thumb relocation. */
5538 /* If the call will go through a PLT entry then we do not need
5540 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5546 /* This one is a call from arm code. We need to look up
5547 the target of the call. If it is a thumb target, we
5549 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5550 record_arm_to_thumb_glue (link_info, h);
5558 if (contents != NULL
5559 && elf_section_data (sec)->this_hdr.contents != contents)
5563 if (internal_relocs != NULL
5564 && elf_section_data (sec)->relocs != internal_relocs)
5565 free (internal_relocs);
5566 internal_relocs = NULL;
5572 if (contents != NULL
5573 && elf_section_data (sec)->this_hdr.contents != contents)
5575 if (internal_relocs != NULL
5576 && elf_section_data (sec)->relocs != internal_relocs)
5577 free (internal_relocs);
5584 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5587 bfd_elf32_arm_init_maps (bfd *abfd)
5589 Elf_Internal_Sym *isymbuf;
5590 Elf_Internal_Shdr *hdr;
5591 unsigned int i, localsyms;
5593 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5594 if (! is_arm_elf (abfd))
5597 if ((abfd->flags & DYNAMIC) != 0)
5600 hdr = & elf_symtab_hdr (abfd);
5601 localsyms = hdr->sh_info;
5603 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5604 should contain the number of local symbols, which should come before any
5605 global symbols. Mapping symbols are always local. */
5606 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5609 /* No internal symbols read? Skip this BFD. */
5610 if (isymbuf == NULL)
5613 for (i = 0; i < localsyms; i++)
5615 Elf_Internal_Sym *isym = &isymbuf[i];
5616 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5620 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5622 name = bfd_elf_string_from_elf_section (abfd,
5623 hdr->sh_link, isym->st_name);
5625 if (bfd_is_arm_special_symbol_name (name,
5626 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5627 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5633 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5634 say what they wanted. */
5637 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5639 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5640 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5642 if (globals == NULL)
5645 if (globals->fix_cortex_a8 == -1)
5647 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5648 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5649 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5650 || out_attr[Tag_CPU_arch_profile].i == 0))
5651 globals->fix_cortex_a8 = 1;
5653 globals->fix_cortex_a8 = 0;
5659 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5661 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5662 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5664 if (globals == NULL)
5666 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5667 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5669 switch (globals->vfp11_fix)
5671 case BFD_ARM_VFP11_FIX_DEFAULT:
5672 case BFD_ARM_VFP11_FIX_NONE:
5673 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5677 /* Give a warning, but do as the user requests anyway. */
5678 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5679 "workaround is not necessary for target architecture"), obfd);
5682 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5683 /* For earlier architectures, we might need the workaround, but do not
5684 enable it by default. If users is running with broken hardware, they
5685 must enable the erratum fix explicitly. */
5686 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5690 enum bfd_arm_vfp11_pipe
5698 /* Return a VFP register number. This is encoded as RX:X for single-precision
5699 registers, or X:RX for double-precision registers, where RX is the group of
5700 four bits in the instruction encoding and X is the single extension bit.
5701 RX and X fields are specified using their lowest (starting) bit. The return
5704 0...31: single-precision registers s0...s31
5705 32...63: double-precision registers d0...d31.
5707 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5708 encounter VFP3 instructions, so we allow the full range for DP registers. */
5711 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5715 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5717 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5720 /* Set bits in *WMASK according to a register number REG as encoded by
5721 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5724 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5729 *wmask |= 3 << ((reg - 32) * 2);
5732 /* Return TRUE if WMASK overwrites anything in REGS. */
5735 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5739 for (i = 0; i < numregs; i++)
5741 unsigned int reg = regs[i];
5743 if (reg < 32 && (wmask & (1 << reg)) != 0)
5751 if ((wmask & (3 << (reg * 2))) != 0)
5758 /* In this function, we're interested in two things: finding input registers
5759 for VFP data-processing instructions, and finding the set of registers which
5760 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5761 hold the written set, so FLDM etc. are easy to deal with (we're only
5762 interested in 32 SP registers or 16 dp registers, due to the VFP version
5763 implemented by the chip in question). DP registers are marked by setting
5764 both SP registers in the write mask). */
5766 static enum bfd_arm_vfp11_pipe
5767 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5770 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5771 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5773 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5776 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5777 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5779 pqrs = ((insn & 0x00800000) >> 20)
5780 | ((insn & 0x00300000) >> 19)
5781 | ((insn & 0x00000040) >> 6);
5785 case 0: /* fmac[sd]. */
5786 case 1: /* fnmac[sd]. */
5787 case 2: /* fmsc[sd]. */
5788 case 3: /* fnmsc[sd]. */
5790 bfd_arm_vfp11_write_mask (destmask, fd);
5792 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5797 case 4: /* fmul[sd]. */
5798 case 5: /* fnmul[sd]. */
5799 case 6: /* fadd[sd]. */
5800 case 7: /* fsub[sd]. */
5804 case 8: /* fdiv[sd]. */
5807 bfd_arm_vfp11_write_mask (destmask, fd);
5808 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5813 case 15: /* extended opcode. */
5815 unsigned int extn = ((insn >> 15) & 0x1e)
5816 | ((insn >> 7) & 1);
5820 case 0: /* fcpy[sd]. */
5821 case 1: /* fabs[sd]. */
5822 case 2: /* fneg[sd]. */
5823 case 8: /* fcmp[sd]. */
5824 case 9: /* fcmpe[sd]. */
5825 case 10: /* fcmpz[sd]. */
5826 case 11: /* fcmpez[sd]. */
5827 case 16: /* fuito[sd]. */
5828 case 17: /* fsito[sd]. */
5829 case 24: /* ftoui[sd]. */
5830 case 25: /* ftouiz[sd]. */
5831 case 26: /* ftosi[sd]. */
5832 case 27: /* ftosiz[sd]. */
5833 /* These instructions will not bounce due to underflow. */
5838 case 3: /* fsqrt[sd]. */
5839 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5840 registers to cause the erratum in previous instructions. */
5841 bfd_arm_vfp11_write_mask (destmask, fd);
5845 case 15: /* fcvt{ds,sd}. */
5849 bfd_arm_vfp11_write_mask (destmask, fd);
5851 /* Only FCVTSD can underflow. */
5852 if ((insn & 0x100) != 0)
5871 /* Two-register transfer. */
5872 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5874 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5876 if ((insn & 0x100000) == 0)
5879 bfd_arm_vfp11_write_mask (destmask, fm);
5882 bfd_arm_vfp11_write_mask (destmask, fm);
5883 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5889 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5891 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5892 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5896 case 0: /* Two-reg transfer. We should catch these above. */
5899 case 2: /* fldm[sdx]. */
5903 unsigned int i, offset = insn & 0xff;
5908 for (i = fd; i < fd + offset; i++)
5909 bfd_arm_vfp11_write_mask (destmask, i);
5913 case 4: /* fld[sd]. */
5915 bfd_arm_vfp11_write_mask (destmask, fd);
5924 /* Single-register transfer. Note L==0. */
5925 else if ((insn & 0x0f100e10) == 0x0e000a10)
5927 unsigned int opcode = (insn >> 21) & 7;
5928 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5932 case 0: /* fmsr/fmdlr. */
5933 case 1: /* fmdhr. */
5934 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5935 destination register. I don't know if this is exactly right,
5936 but it is the conservative choice. */
5937 bfd_arm_vfp11_write_mask (destmask, fn);
5951 static int elf32_arm_compare_mapping (const void * a, const void * b);
5954 /* Look for potentially-troublesome code sequences which might trigger the
5955 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5956 (available from ARM) for details of the erratum. A short version is
5957 described in ld.texinfo. */
5960 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5963 bfd_byte *contents = NULL;
5965 int regs[3], numregs = 0;
5966 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5967 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5969 if (globals == NULL)
5972 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5973 The states transition as follows:
5975 0 -> 1 (vector) or 0 -> 2 (scalar)
5976 A VFP FMAC-pipeline instruction has been seen. Fill
5977 regs[0]..regs[numregs-1] with its input operands. Remember this
5978 instruction in 'first_fmac'.
5981 Any instruction, except for a VFP instruction which overwrites
5986 A VFP instruction has been seen which overwrites any of regs[*].
5987 We must make a veneer! Reset state to 0 before examining next
5991 If we fail to match anything in state 2, reset to state 0 and reset
5992 the instruction pointer to the instruction after 'first_fmac'.
5994 If the VFP11 vector mode is in use, there must be at least two unrelated
5995 instructions between anti-dependent VFP11 instructions to properly avoid
5996 triggering the erratum, hence the use of the extra state 1. */
5998 /* If we are only performing a partial link do not bother
5999 to construct any glue. */
6000 if (link_info->relocatable)
6003 /* Skip if this bfd does not correspond to an ELF image. */
6004 if (! is_arm_elf (abfd))
6007 /* We should have chosen a fix type by the time we get here. */
6008 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6010 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6013 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6014 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6017 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6019 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6020 struct _arm_elf_section_data *sec_data;
6022 /* If we don't have executable progbits, we're not interested in this
6023 section. Also skip if section is to be excluded. */
6024 if (elf_section_type (sec) != SHT_PROGBITS
6025 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6026 || (sec->flags & SEC_EXCLUDE) != 0
6027 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6028 || sec->output_section == bfd_abs_section_ptr
6029 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6032 sec_data = elf32_arm_section_data (sec);
6034 if (sec_data->mapcount == 0)
6037 if (elf_section_data (sec)->this_hdr.contents != NULL)
6038 contents = elf_section_data (sec)->this_hdr.contents;
6039 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6042 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6043 elf32_arm_compare_mapping);
6045 for (span = 0; span < sec_data->mapcount; span++)
6047 unsigned int span_start = sec_data->map[span].vma;
6048 unsigned int span_end = (span == sec_data->mapcount - 1)
6049 ? sec->size : sec_data->map[span + 1].vma;
6050 char span_type = sec_data->map[span].type;
6052 /* FIXME: Only ARM mode is supported at present. We may need to
6053 support Thumb-2 mode also at some point. */
6054 if (span_type != 'a')
6057 for (i = span_start; i < span_end;)
6059 unsigned int next_i = i + 4;
6060 unsigned int insn = bfd_big_endian (abfd)
6061 ? (contents[i] << 24)
6062 | (contents[i + 1] << 16)
6063 | (contents[i + 2] << 8)
6065 : (contents[i + 3] << 24)
6066 | (contents[i + 2] << 16)
6067 | (contents[i + 1] << 8)
6069 unsigned int writemask = 0;
6070 enum bfd_arm_vfp11_pipe vpipe;
6075 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6077 /* I'm assuming the VFP11 erratum can trigger with denorm
6078 operands on either the FMAC or the DS pipeline. This might
6079 lead to slightly overenthusiastic veneer insertion. */
6080 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6082 state = use_vector ? 1 : 2;
6084 veneer_of_insn = insn;
6090 int other_regs[3], other_numregs;
6091 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6094 if (vpipe != VFP11_BAD
6095 && bfd_arm_vfp11_antidependency (writemask, regs,
6105 int other_regs[3], other_numregs;
6106 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6109 if (vpipe != VFP11_BAD
6110 && bfd_arm_vfp11_antidependency (writemask, regs,
6116 next_i = first_fmac + 4;
6122 abort (); /* Should be unreachable. */
6127 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6128 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6131 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6133 newerr->u.b.vfp_insn = veneer_of_insn;
6138 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6145 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6150 newerr->next = sec_data->erratumlist;
6151 sec_data->erratumlist = newerr;
6160 if (contents != NULL
6161 && elf_section_data (sec)->this_hdr.contents != contents)
6169 if (contents != NULL
6170 && elf_section_data (sec)->this_hdr.contents != contents)
6176 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6177 after sections have been laid out, using specially-named symbols. */
6180 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6181 struct bfd_link_info *link_info)
6184 struct elf32_arm_link_hash_table *globals;
6187 if (link_info->relocatable)
6190 /* Skip if this bfd does not correspond to an ELF image. */
6191 if (! is_arm_elf (abfd))
6194 globals = elf32_arm_hash_table (link_info);
6195 if (globals == NULL)
6198 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6199 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6201 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6203 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6204 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6206 for (; errnode != NULL; errnode = errnode->next)
6208 struct elf_link_hash_entry *myh;
6211 switch (errnode->type)
6213 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6214 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6215 /* Find veneer symbol. */
6216 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6217 errnode->u.b.veneer->u.v.id);
6219 myh = elf_link_hash_lookup
6220 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6223 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6224 "`%s'"), abfd, tmp_name);
6226 vma = myh->root.u.def.section->output_section->vma
6227 + myh->root.u.def.section->output_offset
6228 + myh->root.u.def.value;
6230 errnode->u.b.veneer->vma = vma;
6233 case VFP11_ERRATUM_ARM_VENEER:
6234 case VFP11_ERRATUM_THUMB_VENEER:
6235 /* Find return location. */
6236 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6239 myh = elf_link_hash_lookup
6240 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6243 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6244 "`%s'"), abfd, tmp_name);
6246 vma = myh->root.u.def.section->output_section->vma
6247 + myh->root.u.def.section->output_offset
6248 + myh->root.u.def.value;
6250 errnode->u.v.branch->vma = vma;
6263 /* Set target relocation values needed during linking. */
6266 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6267 struct bfd_link_info *link_info,
6269 char * target2_type,
6272 bfd_arm_vfp11_fix vfp11_fix,
6273 int no_enum_warn, int no_wchar_warn,
6274 int pic_veneer, int fix_cortex_a8)
6276 struct elf32_arm_link_hash_table *globals;
6278 globals = elf32_arm_hash_table (link_info);
6279 if (globals == NULL)
6282 globals->target1_is_rel = target1_is_rel;
6283 if (strcmp (target2_type, "rel") == 0)
6284 globals->target2_reloc = R_ARM_REL32;
6285 else if (strcmp (target2_type, "abs") == 0)
6286 globals->target2_reloc = R_ARM_ABS32;
6287 else if (strcmp (target2_type, "got-rel") == 0)
6288 globals->target2_reloc = R_ARM_GOT_PREL;
6291 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6294 globals->fix_v4bx = fix_v4bx;
6295 globals->use_blx |= use_blx;
6296 globals->vfp11_fix = vfp11_fix;
6297 globals->pic_veneer = pic_veneer;
6298 globals->fix_cortex_a8 = fix_cortex_a8;
6300 BFD_ASSERT (is_arm_elf (output_bfd));
6301 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6302 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6305 /* Replace the target offset of a Thumb bl or b.w instruction. */
6308 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6314 BFD_ASSERT ((offset & 1) == 0);
6316 upper = bfd_get_16 (abfd, insn);
6317 lower = bfd_get_16 (abfd, insn + 2);
6318 reloc_sign = (offset < 0) ? 1 : 0;
6319 upper = (upper & ~(bfd_vma) 0x7ff)
6320 | ((offset >> 12) & 0x3ff)
6321 | (reloc_sign << 10);
6322 lower = (lower & ~(bfd_vma) 0x2fff)
6323 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6324 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6325 | ((offset >> 1) & 0x7ff);
6326 bfd_put_16 (abfd, upper, insn);
6327 bfd_put_16 (abfd, lower, insn + 2);
6330 /* Thumb code calling an ARM function. */
6333 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6337 asection * input_section,
6338 bfd_byte * hit_data,
6341 bfd_signed_vma addend,
6343 char **error_message)
6347 long int ret_offset;
6348 struct elf_link_hash_entry * myh;
6349 struct elf32_arm_link_hash_table * globals;
6351 myh = find_thumb_glue (info, name, error_message);
6355 globals = elf32_arm_hash_table (info);
6356 BFD_ASSERT (globals != NULL);
6357 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6359 my_offset = myh->root.u.def.value;
6361 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6362 THUMB2ARM_GLUE_SECTION_NAME);
6364 BFD_ASSERT (s != NULL);
6365 BFD_ASSERT (s->contents != NULL);
6366 BFD_ASSERT (s->output_section != NULL);
6368 if ((my_offset & 0x01) == 0x01)
6371 && sym_sec->owner != NULL
6372 && !INTERWORK_FLAG (sym_sec->owner))
6374 (*_bfd_error_handler)
6375 (_("%B(%s): warning: interworking not enabled.\n"
6376 " first occurrence: %B: thumb call to arm"),
6377 sym_sec->owner, input_bfd, name);
6383 myh->root.u.def.value = my_offset;
6385 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6386 s->contents + my_offset);
6388 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6389 s->contents + my_offset + 2);
6392 /* Address of destination of the stub. */
6393 ((bfd_signed_vma) val)
6395 /* Offset from the start of the current section
6396 to the start of the stubs. */
6398 /* Offset of the start of this stub from the start of the stubs. */
6400 /* Address of the start of the current section. */
6401 + s->output_section->vma)
6402 /* The branch instruction is 4 bytes into the stub. */
6404 /* ARM branches work from the pc of the instruction + 8. */
6407 put_arm_insn (globals, output_bfd,
6408 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6409 s->contents + my_offset + 4);
6412 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6414 /* Now go back and fix up the original BL insn to point to here. */
6416 /* Address of where the stub is located. */
6417 (s->output_section->vma + s->output_offset + my_offset)
6418 /* Address of where the BL is located. */
6419 - (input_section->output_section->vma + input_section->output_offset
6421 /* Addend in the relocation. */
6423 /* Biassing for PC-relative addressing. */
6426 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6431 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6433 static struct elf_link_hash_entry *
6434 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6441 char ** error_message)
6444 long int ret_offset;
6445 struct elf_link_hash_entry * myh;
6446 struct elf32_arm_link_hash_table * globals;
6448 myh = find_arm_glue (info, name, error_message);
6452 globals = elf32_arm_hash_table (info);
6453 BFD_ASSERT (globals != NULL);
6454 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6456 my_offset = myh->root.u.def.value;
6458 if ((my_offset & 0x01) == 0x01)
6461 && sym_sec->owner != NULL
6462 && !INTERWORK_FLAG (sym_sec->owner))
6464 (*_bfd_error_handler)
6465 (_("%B(%s): warning: interworking not enabled.\n"
6466 " first occurrence: %B: arm call to thumb"),
6467 sym_sec->owner, input_bfd, name);
6471 myh->root.u.def.value = my_offset;
6473 if (info->shared || globals->root.is_relocatable_executable
6474 || globals->pic_veneer)
6476 /* For relocatable objects we can't use absolute addresses,
6477 so construct the address from a relative offset. */
6478 /* TODO: If the offset is small it's probably worth
6479 constructing the address with adds. */
6480 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6481 s->contents + my_offset);
6482 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6483 s->contents + my_offset + 4);
6484 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6485 s->contents + my_offset + 8);
6486 /* Adjust the offset by 4 for the position of the add,
6487 and 8 for the pipeline offset. */
6488 ret_offset = (val - (s->output_offset
6489 + s->output_section->vma
6492 bfd_put_32 (output_bfd, ret_offset,
6493 s->contents + my_offset + 12);
6495 else if (globals->use_blx)
6497 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6498 s->contents + my_offset);
6500 /* It's a thumb address. Add the low order bit. */
6501 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6502 s->contents + my_offset + 4);
6506 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6507 s->contents + my_offset);
6509 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6510 s->contents + my_offset + 4);
6512 /* It's a thumb address. Add the low order bit. */
6513 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6514 s->contents + my_offset + 8);
6520 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6525 /* Arm code calling a Thumb function. */
6528 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6532 asection * input_section,
6533 bfd_byte * hit_data,
6536 bfd_signed_vma addend,
6538 char **error_message)
6540 unsigned long int tmp;
6543 long int ret_offset;
6544 struct elf_link_hash_entry * myh;
6545 struct elf32_arm_link_hash_table * globals;
6547 globals = elf32_arm_hash_table (info);
6548 BFD_ASSERT (globals != NULL);
6549 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6551 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6552 ARM2THUMB_GLUE_SECTION_NAME);
6553 BFD_ASSERT (s != NULL);
6554 BFD_ASSERT (s->contents != NULL);
6555 BFD_ASSERT (s->output_section != NULL);
6557 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6558 sym_sec, val, s, error_message);
6562 my_offset = myh->root.u.def.value;
6563 tmp = bfd_get_32 (input_bfd, hit_data);
6564 tmp = tmp & 0xFF000000;
6566 /* Somehow these are both 4 too far, so subtract 8. */
6567 ret_offset = (s->output_offset
6569 + s->output_section->vma
6570 - (input_section->output_offset
6571 + input_section->output_section->vma
6575 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6577 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6582 /* Populate Arm stub for an exported Thumb function. */
6585 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6587 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6589 struct elf_link_hash_entry * myh;
6590 struct elf32_arm_link_hash_entry *eh;
6591 struct elf32_arm_link_hash_table * globals;
6594 char *error_message;
6596 eh = elf32_arm_hash_entry (h);
6597 /* Allocate stubs for exported Thumb functions on v4t. */
6598 if (eh->export_glue == NULL)
6601 globals = elf32_arm_hash_table (info);
6602 BFD_ASSERT (globals != NULL);
6603 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6605 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6606 ARM2THUMB_GLUE_SECTION_NAME);
6607 BFD_ASSERT (s != NULL);
6608 BFD_ASSERT (s->contents != NULL);
6609 BFD_ASSERT (s->output_section != NULL);
6611 sec = eh->export_glue->root.u.def.section;
6613 BFD_ASSERT (sec->output_section != NULL);
6615 val = eh->export_glue->root.u.def.value + sec->output_offset
6616 + sec->output_section->vma;
6618 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6619 h->root.u.def.section->owner,
6620 globals->obfd, sec, val, s,
6626 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6629 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6634 struct elf32_arm_link_hash_table *globals;
6636 globals = elf32_arm_hash_table (info);
6637 BFD_ASSERT (globals != NULL);
6638 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6640 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6641 ARM_BX_GLUE_SECTION_NAME);
6642 BFD_ASSERT (s != NULL);
6643 BFD_ASSERT (s->contents != NULL);
6644 BFD_ASSERT (s->output_section != NULL);
6646 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6648 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6650 if ((globals->bx_glue_offset[reg] & 1) == 0)
6652 p = s->contents + glue_addr;
6653 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6654 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6655 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6656 globals->bx_glue_offset[reg] |= 1;
6659 return glue_addr + s->output_section->vma + s->output_offset;
6662 /* Generate Arm stubs for exported Thumb symbols. */
6664 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6665 struct bfd_link_info *link_info)
6667 struct elf32_arm_link_hash_table * globals;
6669 if (link_info == NULL)
6670 /* Ignore this if we are not called by the ELF backend linker. */
6673 globals = elf32_arm_hash_table (link_info);
6674 if (globals == NULL)
6677 /* If blx is available then exported Thumb symbols are OK and there is
6679 if (globals->use_blx)
6682 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6686 /* Some relocations map to different relocations depending on the
6687 target. Return the real relocation. */
6690 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6696 if (globals->target1_is_rel)
6702 return globals->target2_reloc;
6709 /* Return the base VMA address which should be subtracted from real addresses
6710 when resolving @dtpoff relocation.
6711 This is PT_TLS segment p_vaddr. */
6714 dtpoff_base (struct bfd_link_info *info)
6716 /* If tls_sec is NULL, we should have signalled an error already. */
6717 if (elf_hash_table (info)->tls_sec == NULL)
6719 return elf_hash_table (info)->tls_sec->vma;
6722 /* Return the relocation value for @tpoff relocation
6723 if STT_TLS virtual address is ADDRESS. */
6726 tpoff (struct bfd_link_info *info, bfd_vma address)
6728 struct elf_link_hash_table *htab = elf_hash_table (info);
6731 /* If tls_sec is NULL, we should have signalled an error already. */
6732 if (htab->tls_sec == NULL)
6734 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6735 return address - htab->tls_sec->vma + base;
6738 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6739 VALUE is the relocation value. */
6741 static bfd_reloc_status_type
6742 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6745 return bfd_reloc_overflow;
6747 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6748 bfd_put_32 (abfd, value, data);
6749 return bfd_reloc_ok;
6752 /* For a given value of n, calculate the value of G_n as required to
6753 deal with group relocations. We return it in the form of an
6754 encoded constant-and-rotation, together with the final residual. If n is
6755 specified as less than zero, then final_residual is filled with the
6756 input value and no further action is performed. */
6759 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6763 bfd_vma encoded_g_n = 0;
6764 bfd_vma residual = value; /* Also known as Y_n. */
6766 for (current_n = 0; current_n <= n; current_n++)
6770 /* Calculate which part of the value to mask. */
6777 /* Determine the most significant bit in the residual and
6778 align the resulting value to a 2-bit boundary. */
6779 for (msb = 30; msb >= 0; msb -= 2)
6780 if (residual & (3 << msb))
6783 /* The desired shift is now (msb - 6), or zero, whichever
6790 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6791 g_n = residual & (0xff << shift);
6792 encoded_g_n = (g_n >> shift)
6793 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6795 /* Calculate the residual for the next time around. */
6799 *final_residual = residual;
6804 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6805 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6808 identify_add_or_sub (bfd_vma insn)
6810 int opcode = insn & 0x1e00000;
6812 if (opcode == 1 << 23) /* ADD */
6815 if (opcode == 1 << 22) /* SUB */
6821 /* Perform a relocation as part of a final link. */
6823 static bfd_reloc_status_type
6824 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6827 asection * input_section,
6828 bfd_byte * contents,
6829 Elf_Internal_Rela * rel,
6831 struct bfd_link_info * info,
6833 const char * sym_name,
6835 struct elf_link_hash_entry * h,
6836 bfd_boolean * unresolved_reloc_p,
6837 char ** error_message)
6839 unsigned long r_type = howto->type;
6840 unsigned long r_symndx;
6841 bfd_byte * hit_data = contents + rel->r_offset;
6842 bfd * dynobj = NULL;
6843 Elf_Internal_Shdr * symtab_hdr;
6844 struct elf_link_hash_entry ** sym_hashes;
6845 bfd_vma * local_got_offsets;
6846 asection * sgot = NULL;
6847 asection * splt = NULL;
6848 asection * sreloc = NULL;
6850 bfd_signed_vma signed_addend;
6851 struct elf32_arm_link_hash_table * globals;
6853 globals = elf32_arm_hash_table (info);
6854 if (globals == NULL)
6855 return bfd_reloc_notsupported;
6857 BFD_ASSERT (is_arm_elf (input_bfd));
6859 /* Some relocation types map to different relocations depending on the
6860 target. We pick the right one here. */
6861 r_type = arm_real_reloc_type (globals, r_type);
6862 if (r_type != howto->type)
6863 howto = elf32_arm_howto_from_type (r_type);
6865 /* If the start address has been set, then set the EF_ARM_HASENTRY
6866 flag. Setting this more than once is redundant, but the cost is
6867 not too high, and it keeps the code simple.
6869 The test is done here, rather than somewhere else, because the
6870 start address is only set just before the final link commences.
6872 Note - if the user deliberately sets a start address of 0, the
6873 flag will not be set. */
6874 if (bfd_get_start_address (output_bfd) != 0)
6875 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6877 dynobj = elf_hash_table (info)->dynobj;
6880 sgot = bfd_get_section_by_name (dynobj, ".got");
6881 splt = bfd_get_section_by_name (dynobj, ".plt");
6883 symtab_hdr = & elf_symtab_hdr (input_bfd);
6884 sym_hashes = elf_sym_hashes (input_bfd);
6885 local_got_offsets = elf_local_got_offsets (input_bfd);
6886 r_symndx = ELF32_R_SYM (rel->r_info);
6888 if (globals->use_rel)
6890 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6892 if (addend & ((howto->src_mask + 1) >> 1))
6895 signed_addend &= ~ howto->src_mask;
6896 signed_addend |= addend;
6899 signed_addend = addend;
6902 addend = signed_addend = rel->r_addend;
6907 /* We don't need to find a value for this symbol. It's just a
6909 *unresolved_reloc_p = FALSE;
6910 return bfd_reloc_ok;
6913 if (!globals->vxworks_p)
6914 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6918 case R_ARM_ABS32_NOI:
6920 case R_ARM_REL32_NOI:
6926 /* Handle relocations which should use the PLT entry. ABS32/REL32
6927 will use the symbol's value, which may point to a PLT entry, but we
6928 don't need to handle that here. If we created a PLT entry, all
6929 branches in this object should go to it, except if the PLT is too
6930 far away, in which case a long branch stub should be inserted. */
6931 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6932 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6933 && r_type != R_ARM_CALL
6934 && r_type != R_ARM_JUMP24
6935 && r_type != R_ARM_PLT32)
6938 && h->plt.offset != (bfd_vma) -1)
6940 /* If we've created a .plt section, and assigned a PLT entry to
6941 this function, it should not be known to bind locally. If
6942 it were, we would have cleared the PLT entry. */
6943 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6945 value = (splt->output_section->vma
6946 + splt->output_offset
6948 *unresolved_reloc_p = FALSE;
6949 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6950 contents, rel->r_offset, value,
6954 /* When generating a shared object or relocatable executable, these
6955 relocations are copied into the output file to be resolved at
6957 if ((info->shared || globals->root.is_relocatable_executable)
6958 && (input_section->flags & SEC_ALLOC)
6959 && !(globals->vxworks_p
6960 && strcmp (input_section->output_section->name,
6962 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6963 || !SYMBOL_CALLS_LOCAL (info, h))
6964 && (!strstr (input_section->name, STUB_SUFFIX))
6966 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6967 || h->root.type != bfd_link_hash_undefweak)
6968 && r_type != R_ARM_PC24
6969 && r_type != R_ARM_CALL
6970 && r_type != R_ARM_JUMP24
6971 && r_type != R_ARM_PREL31
6972 && r_type != R_ARM_PLT32)
6974 Elf_Internal_Rela outrel;
6976 bfd_boolean skip, relocate;
6978 *unresolved_reloc_p = FALSE;
6982 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6983 ! globals->use_rel);
6986 return bfd_reloc_notsupported;
6992 outrel.r_addend = addend;
6994 _bfd_elf_section_offset (output_bfd, info, input_section,
6996 if (outrel.r_offset == (bfd_vma) -1)
6998 else if (outrel.r_offset == (bfd_vma) -2)
6999 skip = TRUE, relocate = TRUE;
7000 outrel.r_offset += (input_section->output_section->vma
7001 + input_section->output_offset);
7004 memset (&outrel, 0, sizeof outrel);
7009 || !h->def_regular))
7010 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7015 /* This symbol is local, or marked to become local. */
7016 if (sym_flags == STT_ARM_TFUNC)
7018 if (globals->symbian_p)
7022 /* On Symbian OS, the data segment and text segement
7023 can be relocated independently. Therefore, we
7024 must indicate the segment to which this
7025 relocation is relative. The BPABI allows us to
7026 use any symbol in the right segment; we just use
7027 the section symbol as it is convenient. (We
7028 cannot use the symbol given by "h" directly as it
7029 will not appear in the dynamic symbol table.)
7031 Note that the dynamic linker ignores the section
7032 symbol value, so we don't subtract osec->vma
7033 from the emitted reloc addend. */
7035 osec = sym_sec->output_section;
7037 osec = input_section->output_section;
7038 symbol = elf_section_data (osec)->dynindx;
7041 struct elf_link_hash_table *htab = elf_hash_table (info);
7043 if ((osec->flags & SEC_READONLY) == 0
7044 && htab->data_index_section != NULL)
7045 osec = htab->data_index_section;
7047 osec = htab->text_index_section;
7048 symbol = elf_section_data (osec)->dynindx;
7050 BFD_ASSERT (symbol != 0);
7053 /* On SVR4-ish systems, the dynamic loader cannot
7054 relocate the text and data segments independently,
7055 so the symbol does not matter. */
7057 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7058 if (globals->use_rel)
7061 outrel.r_addend += value;
7064 loc = sreloc->contents;
7065 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7066 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7068 /* If this reloc is against an external symbol, we do not want to
7069 fiddle with the addend. Otherwise, we need to include the symbol
7070 value so that it becomes an addend for the dynamic reloc. */
7072 return bfd_reloc_ok;
7074 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7075 contents, rel->r_offset, value,
7078 else switch (r_type)
7081 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7083 case R_ARM_XPC25: /* Arm BLX instruction. */
7086 case R_ARM_PC24: /* Arm B/BL instruction. */
7089 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7091 if (r_type == R_ARM_XPC25)
7093 /* Check for Arm calling Arm function. */
7094 /* FIXME: Should we translate the instruction into a BL
7095 instruction instead ? */
7096 if (sym_flags != STT_ARM_TFUNC)
7097 (*_bfd_error_handler)
7098 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7100 h ? h->root.root.string : "(local)");
7102 else if (r_type == R_ARM_PC24)
7104 /* Check for Arm calling Thumb function. */
7105 if (sym_flags == STT_ARM_TFUNC)
7107 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7108 output_bfd, input_section,
7109 hit_data, sym_sec, rel->r_offset,
7110 signed_addend, value,
7112 return bfd_reloc_ok;
7114 return bfd_reloc_dangerous;
7118 /* Check if a stub has to be inserted because the
7119 destination is too far or we are changing mode. */
7120 if ( r_type == R_ARM_CALL
7121 || r_type == R_ARM_JUMP24
7122 || r_type == R_ARM_PLT32)
7124 enum elf32_arm_stub_type stub_type = arm_stub_none;
7125 struct elf32_arm_link_hash_entry *hash;
7127 hash = (struct elf32_arm_link_hash_entry *) h;
7128 stub_type = arm_type_of_stub (info, input_section, rel,
7131 input_bfd, sym_name);
7133 if (stub_type != arm_stub_none)
7135 /* The target is out of reach, so redirect the
7136 branch to the local stub for this function. */
7138 stub_entry = elf32_arm_get_stub_entry (input_section,
7142 if (stub_entry != NULL)
7143 value = (stub_entry->stub_offset
7144 + stub_entry->stub_sec->output_offset
7145 + stub_entry->stub_sec->output_section->vma);
7149 /* If the call goes through a PLT entry, make sure to
7150 check distance to the right destination address. */
7153 && h->plt.offset != (bfd_vma) -1)
7155 value = (splt->output_section->vma
7156 + splt->output_offset
7158 *unresolved_reloc_p = FALSE;
7159 /* The PLT entry is in ARM mode, regardless of the
7161 sym_flags = STT_FUNC;
7166 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7168 S is the address of the symbol in the relocation.
7169 P is address of the instruction being relocated.
7170 A is the addend (extracted from the instruction) in bytes.
7172 S is held in 'value'.
7173 P is the base address of the section containing the
7174 instruction plus the offset of the reloc into that
7176 (input_section->output_section->vma +
7177 input_section->output_offset +
7179 A is the addend, converted into bytes, ie:
7182 Note: None of these operations have knowledge of the pipeline
7183 size of the processor, thus it is up to the assembler to
7184 encode this information into the addend. */
7185 value -= (input_section->output_section->vma
7186 + input_section->output_offset);
7187 value -= rel->r_offset;
7188 if (globals->use_rel)
7189 value += (signed_addend << howto->size);
7191 /* RELA addends do not have to be adjusted by howto->size. */
7192 value += signed_addend;
7194 signed_addend = value;
7195 signed_addend >>= howto->rightshift;
7197 /* A branch to an undefined weak symbol is turned into a jump to
7198 the next instruction unless a PLT entry will be created.
7199 Do the same for local undefined symbols.
7200 The jump to the next instruction is optimized as a NOP depending
7201 on the architecture. */
7202 if (h ? (h->root.type == bfd_link_hash_undefweak
7203 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7204 : bfd_is_und_section (sym_sec))
7206 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7208 if (arch_has_arm_nop (globals))
7209 value |= 0x0320f000;
7211 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7215 /* Perform a signed range check. */
7216 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7217 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7218 return bfd_reloc_overflow;
7220 addend = (value & 2);
7222 value = (signed_addend & howto->dst_mask)
7223 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7225 if (r_type == R_ARM_CALL)
7227 /* Set the H bit in the BLX instruction. */
7228 if (sym_flags == STT_ARM_TFUNC)
7233 value &= ~(bfd_vma)(1 << 24);
7236 /* Select the correct instruction (BL or BLX). */
7237 /* Only if we are not handling a BL to a stub. In this
7238 case, mode switching is performed by the stub. */
7239 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7243 value &= ~(bfd_vma)(1 << 28);
7253 if (sym_flags == STT_ARM_TFUNC)
7257 case R_ARM_ABS32_NOI:
7263 if (sym_flags == STT_ARM_TFUNC)
7265 value -= (input_section->output_section->vma
7266 + input_section->output_offset + rel->r_offset);
7269 case R_ARM_REL32_NOI:
7271 value -= (input_section->output_section->vma
7272 + input_section->output_offset + rel->r_offset);
7276 value -= (input_section->output_section->vma
7277 + input_section->output_offset + rel->r_offset);
7278 value += signed_addend;
7279 if (! h || h->root.type != bfd_link_hash_undefweak)
7281 /* Check for overflow. */
7282 if ((value ^ (value >> 1)) & (1 << 30))
7283 return bfd_reloc_overflow;
7285 value &= 0x7fffffff;
7286 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7287 if (sym_flags == STT_ARM_TFUNC)
7292 bfd_put_32 (input_bfd, value, hit_data);
7293 return bfd_reloc_ok;
7298 /* There is no way to tell whether the user intended to use a signed or
7299 unsigned addend. When checking for overflow we accept either,
7300 as specified by the AAELF. */
7301 if ((long) value > 0xff || (long) value < -0x80)
7302 return bfd_reloc_overflow;
7304 bfd_put_8 (input_bfd, value, hit_data);
7305 return bfd_reloc_ok;
7310 /* See comment for R_ARM_ABS8. */
7311 if ((long) value > 0xffff || (long) value < -0x8000)
7312 return bfd_reloc_overflow;
7314 bfd_put_16 (input_bfd, value, hit_data);
7315 return bfd_reloc_ok;
7317 case R_ARM_THM_ABS5:
7318 /* Support ldr and str instructions for the thumb. */
7319 if (globals->use_rel)
7321 /* Need to refetch addend. */
7322 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7323 /* ??? Need to determine shift amount from operand size. */
7324 addend >>= howto->rightshift;
7328 /* ??? Isn't value unsigned? */
7329 if ((long) value > 0x1f || (long) value < -0x10)
7330 return bfd_reloc_overflow;
7332 /* ??? Value needs to be properly shifted into place first. */
7333 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7334 bfd_put_16 (input_bfd, value, hit_data);
7335 return bfd_reloc_ok;
7337 case R_ARM_THM_ALU_PREL_11_0:
7338 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7341 bfd_signed_vma relocation;
7343 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7344 | bfd_get_16 (input_bfd, hit_data + 2);
7346 if (globals->use_rel)
7348 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7349 | ((insn & (1 << 26)) >> 15);
7350 if (insn & 0xf00000)
7351 signed_addend = -signed_addend;
7354 relocation = value + signed_addend;
7355 relocation -= (input_section->output_section->vma
7356 + input_section->output_offset
7359 value = abs (relocation);
7361 if (value >= 0x1000)
7362 return bfd_reloc_overflow;
7364 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7365 | ((value & 0x700) << 4)
7366 | ((value & 0x800) << 15);
7370 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7371 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7373 return bfd_reloc_ok;
7377 /* PR 10073: This reloc is not generated by the GNU toolchain,
7378 but it is supported for compatibility with third party libraries
7379 generated by other compilers, specifically the ARM/IAR. */
7382 bfd_signed_vma relocation;
7384 insn = bfd_get_16 (input_bfd, hit_data);
7386 if (globals->use_rel)
7387 addend = (insn & 0x00ff) << 2;
7389 relocation = value + addend;
7390 relocation -= (input_section->output_section->vma
7391 + input_section->output_offset
7394 value = abs (relocation);
7396 /* We do not check for overflow of this reloc. Although strictly
7397 speaking this is incorrect, it appears to be necessary in order
7398 to work with IAR generated relocs. Since GCC and GAS do not
7399 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7400 a problem for them. */
7403 insn = (insn & 0xff00) | (value >> 2);
7405 bfd_put_16 (input_bfd, insn, hit_data);
7407 return bfd_reloc_ok;
7410 case R_ARM_THM_PC12:
7411 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7414 bfd_signed_vma relocation;
7416 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7417 | bfd_get_16 (input_bfd, hit_data + 2);
7419 if (globals->use_rel)
7421 signed_addend = insn & 0xfff;
7422 if (!(insn & (1 << 23)))
7423 signed_addend = -signed_addend;
7426 relocation = value + signed_addend;
7427 relocation -= (input_section->output_section->vma
7428 + input_section->output_offset
7431 value = abs (relocation);
7433 if (value >= 0x1000)
7434 return bfd_reloc_overflow;
7436 insn = (insn & 0xff7ff000) | value;
7437 if (relocation >= 0)
7440 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7441 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7443 return bfd_reloc_ok;
7446 case R_ARM_THM_XPC22:
7447 case R_ARM_THM_CALL:
7448 case R_ARM_THM_JUMP24:
7449 /* Thumb BL (branch long instruction). */
7453 bfd_boolean overflow = FALSE;
7454 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7455 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7456 bfd_signed_vma reloc_signed_max;
7457 bfd_signed_vma reloc_signed_min;
7459 bfd_signed_vma signed_check;
7461 const int thumb2 = using_thumb2 (globals);
7463 /* A branch to an undefined weak symbol is turned into a jump to
7464 the next instruction unless a PLT entry will be created.
7465 The jump to the next instruction is optimized as a NOP.W for
7466 Thumb-2 enabled architectures. */
7467 if (h && h->root.type == bfd_link_hash_undefweak
7468 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7470 if (arch_has_thumb2_nop (globals))
7472 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7473 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7477 bfd_put_16 (input_bfd, 0xe000, hit_data);
7478 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7480 return bfd_reloc_ok;
7483 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7484 with Thumb-1) involving the J1 and J2 bits. */
7485 if (globals->use_rel)
7487 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7488 bfd_vma upper = upper_insn & 0x3ff;
7489 bfd_vma lower = lower_insn & 0x7ff;
7490 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7491 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7492 bfd_vma i1 = j1 ^ s ? 0 : 1;
7493 bfd_vma i2 = j2 ^ s ? 0 : 1;
7495 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7497 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7499 signed_addend = addend;
7502 if (r_type == R_ARM_THM_XPC22)
7504 /* Check for Thumb to Thumb call. */
7505 /* FIXME: Should we translate the instruction into a BL
7506 instruction instead ? */
7507 if (sym_flags == STT_ARM_TFUNC)
7508 (*_bfd_error_handler)
7509 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7511 h ? h->root.root.string : "(local)");
7515 /* If it is not a call to Thumb, assume call to Arm.
7516 If it is a call relative to a section name, then it is not a
7517 function call at all, but rather a long jump. Calls through
7518 the PLT do not require stubs. */
7519 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7520 && (h == NULL || splt == NULL
7521 || h->plt.offset == (bfd_vma) -1))
7523 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7525 /* Convert BL to BLX. */
7526 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7528 else if (( r_type != R_ARM_THM_CALL)
7529 && (r_type != R_ARM_THM_JUMP24))
7531 if (elf32_thumb_to_arm_stub
7532 (info, sym_name, input_bfd, output_bfd, input_section,
7533 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7535 return bfd_reloc_ok;
7537 return bfd_reloc_dangerous;
7540 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7541 && r_type == R_ARM_THM_CALL)
7543 /* Make sure this is a BL. */
7544 lower_insn |= 0x1800;
7548 enum elf32_arm_stub_type stub_type = arm_stub_none;
7549 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7551 /* Check if a stub has to be inserted because the destination
7553 struct elf32_arm_stub_hash_entry *stub_entry;
7554 struct elf32_arm_link_hash_entry *hash;
7556 hash = (struct elf32_arm_link_hash_entry *) h;
7558 stub_type = arm_type_of_stub (info, input_section, rel,
7559 &sym_flags, hash, value, sym_sec,
7560 input_bfd, sym_name);
7562 if (stub_type != arm_stub_none)
7564 /* The target is out of reach or we are changing modes, so
7565 redirect the branch to the local stub for this
7567 stub_entry = elf32_arm_get_stub_entry (input_section,
7571 if (stub_entry != NULL)
7572 value = (stub_entry->stub_offset
7573 + stub_entry->stub_sec->output_offset
7574 + stub_entry->stub_sec->output_section->vma);
7576 /* If this call becomes a call to Arm, force BLX. */
7577 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7580 && !arm_stub_is_thumb (stub_entry->stub_type))
7581 || (sym_flags != STT_ARM_TFUNC))
7582 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7587 /* Handle calls via the PLT. */
7588 if (stub_type == arm_stub_none
7591 && h->plt.offset != (bfd_vma) -1)
7593 value = (splt->output_section->vma
7594 + splt->output_offset
7597 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7599 /* If the Thumb BLX instruction is available, convert
7600 the BL to a BLX instruction to call the ARM-mode
7602 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7603 sym_flags = STT_FUNC;
7607 /* Target the Thumb stub before the ARM PLT entry. */
7608 value -= PLT_THUMB_STUB_SIZE;
7609 sym_flags = STT_ARM_TFUNC;
7611 *unresolved_reloc_p = FALSE;
7614 relocation = value + signed_addend;
7616 relocation -= (input_section->output_section->vma
7617 + input_section->output_offset
7620 check = relocation >> howto->rightshift;
7622 /* If this is a signed value, the rightshift just dropped
7623 leading 1 bits (assuming twos complement). */
7624 if ((bfd_signed_vma) relocation >= 0)
7625 signed_check = check;
7627 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7629 /* Calculate the permissable maximum and minimum values for
7630 this relocation according to whether we're relocating for
7632 bitsize = howto->bitsize;
7635 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7636 reloc_signed_min = ~reloc_signed_max;
7638 /* Assumes two's complement. */
7639 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7642 if ((lower_insn & 0x5000) == 0x4000)
7643 /* For a BLX instruction, make sure that the relocation is rounded up
7644 to a word boundary. This follows the semantics of the instruction
7645 which specifies that bit 1 of the target address will come from bit
7646 1 of the base address. */
7647 relocation = (relocation + 2) & ~ 3;
7649 /* Put RELOCATION back into the insn. Assumes two's complement.
7650 We use the Thumb-2 encoding, which is safe even if dealing with
7651 a Thumb-1 instruction by virtue of our overflow check above. */
7652 reloc_sign = (signed_check < 0) ? 1 : 0;
7653 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7654 | ((relocation >> 12) & 0x3ff)
7655 | (reloc_sign << 10);
7656 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7657 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7658 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7659 | ((relocation >> 1) & 0x7ff);
7661 /* Put the relocated value back in the object file: */
7662 bfd_put_16 (input_bfd, upper_insn, hit_data);
7663 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7665 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7669 case R_ARM_THM_JUMP19:
7670 /* Thumb32 conditional branch instruction. */
7673 bfd_boolean overflow = FALSE;
7674 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7675 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7676 bfd_signed_vma reloc_signed_max = 0xffffe;
7677 bfd_signed_vma reloc_signed_min = -0x100000;
7678 bfd_signed_vma signed_check;
7680 /* Need to refetch the addend, reconstruct the top three bits,
7681 and squish the two 11 bit pieces together. */
7682 if (globals->use_rel)
7684 bfd_vma S = (upper_insn & 0x0400) >> 10;
7685 bfd_vma upper = (upper_insn & 0x003f);
7686 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7687 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7688 bfd_vma lower = (lower_insn & 0x07ff);
7693 upper -= 0x0100; /* Sign extend. */
7695 addend = (upper << 12) | (lower << 1);
7696 signed_addend = addend;
7699 /* Handle calls via the PLT. */
7700 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7702 value = (splt->output_section->vma
7703 + splt->output_offset
7705 /* Target the Thumb stub before the ARM PLT entry. */
7706 value -= PLT_THUMB_STUB_SIZE;
7707 *unresolved_reloc_p = FALSE;
7710 /* ??? Should handle interworking? GCC might someday try to
7711 use this for tail calls. */
7713 relocation = value + signed_addend;
7714 relocation -= (input_section->output_section->vma
7715 + input_section->output_offset
7717 signed_check = (bfd_signed_vma) relocation;
7719 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7722 /* Put RELOCATION back into the insn. */
7724 bfd_vma S = (relocation & 0x00100000) >> 20;
7725 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7726 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7727 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7728 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7730 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7731 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7734 /* Put the relocated value back in the object file: */
7735 bfd_put_16 (input_bfd, upper_insn, hit_data);
7736 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7738 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7741 case R_ARM_THM_JUMP11:
7742 case R_ARM_THM_JUMP8:
7743 case R_ARM_THM_JUMP6:
7744 /* Thumb B (branch) instruction). */
7746 bfd_signed_vma relocation;
7747 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7748 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7749 bfd_signed_vma signed_check;
7751 /* CZB cannot jump backward. */
7752 if (r_type == R_ARM_THM_JUMP6)
7753 reloc_signed_min = 0;
7755 if (globals->use_rel)
7757 /* Need to refetch addend. */
7758 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7759 if (addend & ((howto->src_mask + 1) >> 1))
7762 signed_addend &= ~ howto->src_mask;
7763 signed_addend |= addend;
7766 signed_addend = addend;
7767 /* The value in the insn has been right shifted. We need to
7768 undo this, so that we can perform the address calculation
7769 in terms of bytes. */
7770 signed_addend <<= howto->rightshift;
7772 relocation = value + signed_addend;
7774 relocation -= (input_section->output_section->vma
7775 + input_section->output_offset
7778 relocation >>= howto->rightshift;
7779 signed_check = relocation;
7781 if (r_type == R_ARM_THM_JUMP6)
7782 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7784 relocation &= howto->dst_mask;
7785 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7787 bfd_put_16 (input_bfd, relocation, hit_data);
7789 /* Assumes two's complement. */
7790 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7791 return bfd_reloc_overflow;
7793 return bfd_reloc_ok;
7796 case R_ARM_ALU_PCREL7_0:
7797 case R_ARM_ALU_PCREL15_8:
7798 case R_ARM_ALU_PCREL23_15:
7803 insn = bfd_get_32 (input_bfd, hit_data);
7804 if (globals->use_rel)
7806 /* Extract the addend. */
7807 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7808 signed_addend = addend;
7810 relocation = value + signed_addend;
7812 relocation -= (input_section->output_section->vma
7813 + input_section->output_offset
7815 insn = (insn & ~0xfff)
7816 | ((howto->bitpos << 7) & 0xf00)
7817 | ((relocation >> howto->bitpos) & 0xff);
7818 bfd_put_32 (input_bfd, value, hit_data);
7820 return bfd_reloc_ok;
7822 case R_ARM_GNU_VTINHERIT:
7823 case R_ARM_GNU_VTENTRY:
7824 return bfd_reloc_ok;
7826 case R_ARM_GOTOFF32:
7827 /* Relocation is relative to the start of the
7828 global offset table. */
7830 BFD_ASSERT (sgot != NULL);
7832 return bfd_reloc_notsupported;
7834 /* If we are addressing a Thumb function, we need to adjust the
7835 address by one, so that attempts to call the function pointer will
7836 correctly interpret it as Thumb code. */
7837 if (sym_flags == STT_ARM_TFUNC)
7840 /* Note that sgot->output_offset is not involved in this
7841 calculation. We always want the start of .got. If we
7842 define _GLOBAL_OFFSET_TABLE in a different way, as is
7843 permitted by the ABI, we might have to change this
7845 value -= sgot->output_section->vma;
7846 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7847 contents, rel->r_offset, value,
7851 /* Use global offset table as symbol value. */
7852 BFD_ASSERT (sgot != NULL);
7855 return bfd_reloc_notsupported;
7857 *unresolved_reloc_p = FALSE;
7858 value = sgot->output_section->vma;
7859 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7860 contents, rel->r_offset, value,
7864 case R_ARM_GOT_PREL:
7865 /* Relocation is to the entry for this symbol in the
7866 global offset table. */
7868 return bfd_reloc_notsupported;
7875 off = h->got.offset;
7876 BFD_ASSERT (off != (bfd_vma) -1);
7877 dyn = globals->root.dynamic_sections_created;
7879 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7881 && SYMBOL_REFERENCES_LOCAL (info, h))
7882 || (ELF_ST_VISIBILITY (h->other)
7883 && h->root.type == bfd_link_hash_undefweak))
7885 /* This is actually a static link, or it is a -Bsymbolic link
7886 and the symbol is defined locally. We must initialize this
7887 entry in the global offset table. Since the offset must
7888 always be a multiple of 4, we use the least significant bit
7889 to record whether we have initialized it already.
7891 When doing a dynamic link, we create a .rel(a).got relocation
7892 entry to initialize the value. This is done in the
7893 finish_dynamic_symbol routine. */
7898 /* If we are addressing a Thumb function, we need to
7899 adjust the address by one, so that attempts to
7900 call the function pointer will correctly
7901 interpret it as Thumb code. */
7902 if (sym_flags == STT_ARM_TFUNC)
7905 bfd_put_32 (output_bfd, value, sgot->contents + off);
7910 *unresolved_reloc_p = FALSE;
7912 value = sgot->output_offset + off;
7918 BFD_ASSERT (local_got_offsets != NULL &&
7919 local_got_offsets[r_symndx] != (bfd_vma) -1);
7921 off = local_got_offsets[r_symndx];
7923 /* The offset must always be a multiple of 4. We use the
7924 least significant bit to record whether we have already
7925 generated the necessary reloc. */
7930 /* If we are addressing a Thumb function, we need to
7931 adjust the address by one, so that attempts to
7932 call the function pointer will correctly
7933 interpret it as Thumb code. */
7934 if (sym_flags == STT_ARM_TFUNC)
7937 if (globals->use_rel)
7938 bfd_put_32 (output_bfd, value, sgot->contents + off);
7943 Elf_Internal_Rela outrel;
7946 srelgot = (bfd_get_section_by_name
7947 (dynobj, RELOC_SECTION (globals, ".got")));
7948 BFD_ASSERT (srelgot != NULL);
7950 outrel.r_addend = addend + value;
7951 outrel.r_offset = (sgot->output_section->vma
7952 + sgot->output_offset
7954 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7955 loc = srelgot->contents;
7956 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7957 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7960 local_got_offsets[r_symndx] |= 1;
7963 value = sgot->output_offset + off;
7965 if (r_type != R_ARM_GOT32)
7966 value += sgot->output_section->vma;
7968 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7969 contents, rel->r_offset, value,
7972 case R_ARM_TLS_LDO32:
7973 value = value - dtpoff_base (info);
7975 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7976 contents, rel->r_offset, value,
7979 case R_ARM_TLS_LDM32:
7983 if (globals->sgot == NULL)
7986 off = globals->tls_ldm_got.offset;
7992 /* If we don't know the module number, create a relocation
7996 Elf_Internal_Rela outrel;
7999 if (globals->srelgot == NULL)
8002 outrel.r_addend = 0;
8003 outrel.r_offset = (globals->sgot->output_section->vma
8004 + globals->sgot->output_offset + off);
8005 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
8007 if (globals->use_rel)
8008 bfd_put_32 (output_bfd, outrel.r_addend,
8009 globals->sgot->contents + off);
8011 loc = globals->srelgot->contents;
8012 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
8013 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8016 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
8018 globals->tls_ldm_got.offset |= 1;
8021 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8022 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8024 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8025 contents, rel->r_offset, value,
8029 case R_ARM_TLS_GD32:
8030 case R_ARM_TLS_IE32:
8036 if (globals->sgot == NULL)
8043 dyn = globals->root.dynamic_sections_created;
8044 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8046 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8048 *unresolved_reloc_p = FALSE;
8051 off = h->got.offset;
8052 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8056 if (local_got_offsets == NULL)
8058 off = local_got_offsets[r_symndx];
8059 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8062 if (tls_type == GOT_UNKNOWN)
8069 bfd_boolean need_relocs = FALSE;
8070 Elf_Internal_Rela outrel;
8071 bfd_byte *loc = NULL;
8074 /* The GOT entries have not been initialized yet. Do it
8075 now, and emit any relocations. If both an IE GOT and a
8076 GD GOT are necessary, we emit the GD first. */
8078 if ((info->shared || indx != 0)
8080 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8081 || h->root.type != bfd_link_hash_undefweak))
8084 if (globals->srelgot == NULL)
8086 loc = globals->srelgot->contents;
8087 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8090 if (tls_type & GOT_TLS_GD)
8094 outrel.r_addend = 0;
8095 outrel.r_offset = (globals->sgot->output_section->vma
8096 + globals->sgot->output_offset
8098 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8100 if (globals->use_rel)
8101 bfd_put_32 (output_bfd, outrel.r_addend,
8102 globals->sgot->contents + cur_off);
8104 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8105 globals->srelgot->reloc_count++;
8106 loc += RELOC_SIZE (globals);
8109 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8110 globals->sgot->contents + cur_off + 4);
8113 outrel.r_addend = 0;
8114 outrel.r_info = ELF32_R_INFO (indx,
8115 R_ARM_TLS_DTPOFF32);
8116 outrel.r_offset += 4;
8118 if (globals->use_rel)
8119 bfd_put_32 (output_bfd, outrel.r_addend,
8120 globals->sgot->contents + cur_off + 4);
8123 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8124 globals->srelgot->reloc_count++;
8125 loc += RELOC_SIZE (globals);
8130 /* If we are not emitting relocations for a
8131 general dynamic reference, then we must be in a
8132 static link or an executable link with the
8133 symbol binding locally. Mark it as belonging
8134 to module 1, the executable. */
8135 bfd_put_32 (output_bfd, 1,
8136 globals->sgot->contents + cur_off);
8137 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8138 globals->sgot->contents + cur_off + 4);
8144 if (tls_type & GOT_TLS_IE)
8149 outrel.r_addend = value - dtpoff_base (info);
8151 outrel.r_addend = 0;
8152 outrel.r_offset = (globals->sgot->output_section->vma
8153 + globals->sgot->output_offset
8155 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8157 if (globals->use_rel)
8158 bfd_put_32 (output_bfd, outrel.r_addend,
8159 globals->sgot->contents + cur_off);
8161 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8162 globals->srelgot->reloc_count++;
8163 loc += RELOC_SIZE (globals);
8166 bfd_put_32 (output_bfd, tpoff (info, value),
8167 globals->sgot->contents + cur_off);
8174 local_got_offsets[r_symndx] |= 1;
8177 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8179 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8180 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8182 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8183 contents, rel->r_offset, value,
8187 case R_ARM_TLS_LE32:
8190 (*_bfd_error_handler)
8191 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8192 input_bfd, input_section,
8193 (long) rel->r_offset, howto->name);
8194 return (bfd_reloc_status_type) FALSE;
8197 value = tpoff (info, value);
8199 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8200 contents, rel->r_offset, value,
8204 if (globals->fix_v4bx)
8206 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8208 /* Ensure that we have a BX instruction. */
8209 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8211 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8213 /* Branch to veneer. */
8215 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8216 glue_addr -= input_section->output_section->vma
8217 + input_section->output_offset
8218 + rel->r_offset + 8;
8219 insn = (insn & 0xf0000000) | 0x0a000000
8220 | ((glue_addr >> 2) & 0x00ffffff);
8224 /* Preserve Rm (lowest four bits) and the condition code
8225 (highest four bits). Other bits encode MOV PC,Rm. */
8226 insn = (insn & 0xf000000f) | 0x01a0f000;
8229 bfd_put_32 (input_bfd, insn, hit_data);
8231 return bfd_reloc_ok;
8233 case R_ARM_MOVW_ABS_NC:
8234 case R_ARM_MOVT_ABS:
8235 case R_ARM_MOVW_PREL_NC:
8236 case R_ARM_MOVT_PREL:
8237 /* Until we properly support segment-base-relative addressing then
8238 we assume the segment base to be zero, as for the group relocations.
8239 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8240 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8241 case R_ARM_MOVW_BREL_NC:
8242 case R_ARM_MOVW_BREL:
8243 case R_ARM_MOVT_BREL:
8245 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8247 if (globals->use_rel)
8249 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8250 signed_addend = (addend ^ 0x8000) - 0x8000;
8253 value += signed_addend;
8255 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8256 value -= (input_section->output_section->vma
8257 + input_section->output_offset + rel->r_offset);
8259 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8260 return bfd_reloc_overflow;
8262 if (sym_flags == STT_ARM_TFUNC)
8265 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8266 || r_type == R_ARM_MOVT_BREL)
8270 insn |= value & 0xfff;
8271 insn |= (value & 0xf000) << 4;
8272 bfd_put_32 (input_bfd, insn, hit_data);
8274 return bfd_reloc_ok;
8276 case R_ARM_THM_MOVW_ABS_NC:
8277 case R_ARM_THM_MOVT_ABS:
8278 case R_ARM_THM_MOVW_PREL_NC:
8279 case R_ARM_THM_MOVT_PREL:
8280 /* Until we properly support segment-base-relative addressing then
8281 we assume the segment base to be zero, as for the above relocations.
8282 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8283 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8284 as R_ARM_THM_MOVT_ABS. */
8285 case R_ARM_THM_MOVW_BREL_NC:
8286 case R_ARM_THM_MOVW_BREL:
8287 case R_ARM_THM_MOVT_BREL:
8291 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8292 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8294 if (globals->use_rel)
8296 addend = ((insn >> 4) & 0xf000)
8297 | ((insn >> 15) & 0x0800)
8298 | ((insn >> 4) & 0x0700)
8300 signed_addend = (addend ^ 0x8000) - 0x8000;
8303 value += signed_addend;
8305 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8306 value -= (input_section->output_section->vma
8307 + input_section->output_offset + rel->r_offset);
8309 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8310 return bfd_reloc_overflow;
8312 if (sym_flags == STT_ARM_TFUNC)
8315 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8316 || r_type == R_ARM_THM_MOVT_BREL)
8320 insn |= (value & 0xf000) << 4;
8321 insn |= (value & 0x0800) << 15;
8322 insn |= (value & 0x0700) << 4;
8323 insn |= (value & 0x00ff);
8325 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8326 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8328 return bfd_reloc_ok;
8330 case R_ARM_ALU_PC_G0_NC:
8331 case R_ARM_ALU_PC_G1_NC:
8332 case R_ARM_ALU_PC_G0:
8333 case R_ARM_ALU_PC_G1:
8334 case R_ARM_ALU_PC_G2:
8335 case R_ARM_ALU_SB_G0_NC:
8336 case R_ARM_ALU_SB_G1_NC:
8337 case R_ARM_ALU_SB_G0:
8338 case R_ARM_ALU_SB_G1:
8339 case R_ARM_ALU_SB_G2:
8341 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8342 bfd_vma pc = input_section->output_section->vma
8343 + input_section->output_offset + rel->r_offset;
8344 /* sb should be the origin of the *segment* containing the symbol.
8345 It is not clear how to obtain this OS-dependent value, so we
8346 make an arbitrary choice of zero. */
8350 bfd_signed_vma signed_value;
8353 /* Determine which group of bits to select. */
8356 case R_ARM_ALU_PC_G0_NC:
8357 case R_ARM_ALU_PC_G0:
8358 case R_ARM_ALU_SB_G0_NC:
8359 case R_ARM_ALU_SB_G0:
8363 case R_ARM_ALU_PC_G1_NC:
8364 case R_ARM_ALU_PC_G1:
8365 case R_ARM_ALU_SB_G1_NC:
8366 case R_ARM_ALU_SB_G1:
8370 case R_ARM_ALU_PC_G2:
8371 case R_ARM_ALU_SB_G2:
8379 /* If REL, extract the addend from the insn. If RELA, it will
8380 have already been fetched for us. */
8381 if (globals->use_rel)
8384 bfd_vma constant = insn & 0xff;
8385 bfd_vma rotation = (insn & 0xf00) >> 8;
8388 signed_addend = constant;
8391 /* Compensate for the fact that in the instruction, the
8392 rotation is stored in multiples of 2 bits. */
8395 /* Rotate "constant" right by "rotation" bits. */
8396 signed_addend = (constant >> rotation) |
8397 (constant << (8 * sizeof (bfd_vma) - rotation));
8400 /* Determine if the instruction is an ADD or a SUB.
8401 (For REL, this determines the sign of the addend.) */
8402 negative = identify_add_or_sub (insn);
8405 (*_bfd_error_handler)
8406 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8407 input_bfd, input_section,
8408 (long) rel->r_offset, howto->name);
8409 return bfd_reloc_overflow;
8412 signed_addend *= negative;
8415 /* Compute the value (X) to go in the place. */
8416 if (r_type == R_ARM_ALU_PC_G0_NC
8417 || r_type == R_ARM_ALU_PC_G1_NC
8418 || r_type == R_ARM_ALU_PC_G0
8419 || r_type == R_ARM_ALU_PC_G1
8420 || r_type == R_ARM_ALU_PC_G2)
8422 signed_value = value - pc + signed_addend;
8424 /* Section base relative. */
8425 signed_value = value - sb + signed_addend;
8427 /* If the target symbol is a Thumb function, then set the
8428 Thumb bit in the address. */
8429 if (sym_flags == STT_ARM_TFUNC)
8432 /* Calculate the value of the relevant G_n, in encoded
8433 constant-with-rotation format. */
8434 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8437 /* Check for overflow if required. */
8438 if ((r_type == R_ARM_ALU_PC_G0
8439 || r_type == R_ARM_ALU_PC_G1
8440 || r_type == R_ARM_ALU_PC_G2
8441 || r_type == R_ARM_ALU_SB_G0
8442 || r_type == R_ARM_ALU_SB_G1
8443 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8445 (*_bfd_error_handler)
8446 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8447 input_bfd, input_section,
8448 (long) rel->r_offset, abs (signed_value), howto->name);
8449 return bfd_reloc_overflow;
8452 /* Mask out the value and the ADD/SUB part of the opcode; take care
8453 not to destroy the S bit. */
8456 /* Set the opcode according to whether the value to go in the
8457 place is negative. */
8458 if (signed_value < 0)
8463 /* Encode the offset. */
8466 bfd_put_32 (input_bfd, insn, hit_data);
8468 return bfd_reloc_ok;
8470 case R_ARM_LDR_PC_G0:
8471 case R_ARM_LDR_PC_G1:
8472 case R_ARM_LDR_PC_G2:
8473 case R_ARM_LDR_SB_G0:
8474 case R_ARM_LDR_SB_G1:
8475 case R_ARM_LDR_SB_G2:
8477 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8478 bfd_vma pc = input_section->output_section->vma
8479 + input_section->output_offset + rel->r_offset;
8480 bfd_vma sb = 0; /* See note above. */
8482 bfd_signed_vma signed_value;
8485 /* Determine which groups of bits to calculate. */
8488 case R_ARM_LDR_PC_G0:
8489 case R_ARM_LDR_SB_G0:
8493 case R_ARM_LDR_PC_G1:
8494 case R_ARM_LDR_SB_G1:
8498 case R_ARM_LDR_PC_G2:
8499 case R_ARM_LDR_SB_G2:
8507 /* If REL, extract the addend from the insn. If RELA, it will
8508 have already been fetched for us. */
8509 if (globals->use_rel)
8511 int negative = (insn & (1 << 23)) ? 1 : -1;
8512 signed_addend = negative * (insn & 0xfff);
8515 /* Compute the value (X) to go in the place. */
8516 if (r_type == R_ARM_LDR_PC_G0
8517 || r_type == R_ARM_LDR_PC_G1
8518 || r_type == R_ARM_LDR_PC_G2)
8520 signed_value = value - pc + signed_addend;
8522 /* Section base relative. */
8523 signed_value = value - sb + signed_addend;
8525 /* Calculate the value of the relevant G_{n-1} to obtain
8526 the residual at that stage. */
8527 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8529 /* Check for overflow. */
8530 if (residual >= 0x1000)
8532 (*_bfd_error_handler)
8533 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8534 input_bfd, input_section,
8535 (long) rel->r_offset, abs (signed_value), howto->name);
8536 return bfd_reloc_overflow;
8539 /* Mask out the value and U bit. */
8542 /* Set the U bit if the value to go in the place is non-negative. */
8543 if (signed_value >= 0)
8546 /* Encode the offset. */
8549 bfd_put_32 (input_bfd, insn, hit_data);
8551 return bfd_reloc_ok;
8553 case R_ARM_LDRS_PC_G0:
8554 case R_ARM_LDRS_PC_G1:
8555 case R_ARM_LDRS_PC_G2:
8556 case R_ARM_LDRS_SB_G0:
8557 case R_ARM_LDRS_SB_G1:
8558 case R_ARM_LDRS_SB_G2:
8560 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8561 bfd_vma pc = input_section->output_section->vma
8562 + input_section->output_offset + rel->r_offset;
8563 bfd_vma sb = 0; /* See note above. */
8565 bfd_signed_vma signed_value;
8568 /* Determine which groups of bits to calculate. */
8571 case R_ARM_LDRS_PC_G0:
8572 case R_ARM_LDRS_SB_G0:
8576 case R_ARM_LDRS_PC_G1:
8577 case R_ARM_LDRS_SB_G1:
8581 case R_ARM_LDRS_PC_G2:
8582 case R_ARM_LDRS_SB_G2:
8590 /* If REL, extract the addend from the insn. If RELA, it will
8591 have already been fetched for us. */
8592 if (globals->use_rel)
8594 int negative = (insn & (1 << 23)) ? 1 : -1;
8595 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8598 /* Compute the value (X) to go in the place. */
8599 if (r_type == R_ARM_LDRS_PC_G0
8600 || r_type == R_ARM_LDRS_PC_G1
8601 || r_type == R_ARM_LDRS_PC_G2)
8603 signed_value = value - pc + signed_addend;
8605 /* Section base relative. */
8606 signed_value = value - sb + signed_addend;
8608 /* Calculate the value of the relevant G_{n-1} to obtain
8609 the residual at that stage. */
8610 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8612 /* Check for overflow. */
8613 if (residual >= 0x100)
8615 (*_bfd_error_handler)
8616 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8617 input_bfd, input_section,
8618 (long) rel->r_offset, abs (signed_value), howto->name);
8619 return bfd_reloc_overflow;
8622 /* Mask out the value and U bit. */
8625 /* Set the U bit if the value to go in the place is non-negative. */
8626 if (signed_value >= 0)
8629 /* Encode the offset. */
8630 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8632 bfd_put_32 (input_bfd, insn, hit_data);
8634 return bfd_reloc_ok;
8636 case R_ARM_LDC_PC_G0:
8637 case R_ARM_LDC_PC_G1:
8638 case R_ARM_LDC_PC_G2:
8639 case R_ARM_LDC_SB_G0:
8640 case R_ARM_LDC_SB_G1:
8641 case R_ARM_LDC_SB_G2:
8643 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8644 bfd_vma pc = input_section->output_section->vma
8645 + input_section->output_offset + rel->r_offset;
8646 bfd_vma sb = 0; /* See note above. */
8648 bfd_signed_vma signed_value;
8651 /* Determine which groups of bits to calculate. */
8654 case R_ARM_LDC_PC_G0:
8655 case R_ARM_LDC_SB_G0:
8659 case R_ARM_LDC_PC_G1:
8660 case R_ARM_LDC_SB_G1:
8664 case R_ARM_LDC_PC_G2:
8665 case R_ARM_LDC_SB_G2:
8673 /* If REL, extract the addend from the insn. If RELA, it will
8674 have already been fetched for us. */
8675 if (globals->use_rel)
8677 int negative = (insn & (1 << 23)) ? 1 : -1;
8678 signed_addend = negative * ((insn & 0xff) << 2);
8681 /* Compute the value (X) to go in the place. */
8682 if (r_type == R_ARM_LDC_PC_G0
8683 || r_type == R_ARM_LDC_PC_G1
8684 || r_type == R_ARM_LDC_PC_G2)
8686 signed_value = value - pc + signed_addend;
8688 /* Section base relative. */
8689 signed_value = value - sb + signed_addend;
8691 /* Calculate the value of the relevant G_{n-1} to obtain
8692 the residual at that stage. */
8693 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8695 /* Check for overflow. (The absolute value to go in the place must be
8696 divisible by four and, after having been divided by four, must
8697 fit in eight bits.) */
8698 if ((residual & 0x3) != 0 || residual >= 0x400)
8700 (*_bfd_error_handler)
8701 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8702 input_bfd, input_section,
8703 (long) rel->r_offset, abs (signed_value), howto->name);
8704 return bfd_reloc_overflow;
8707 /* Mask out the value and U bit. */
8710 /* Set the U bit if the value to go in the place is non-negative. */
8711 if (signed_value >= 0)
8714 /* Encode the offset. */
8715 insn |= residual >> 2;
8717 bfd_put_32 (input_bfd, insn, hit_data);
8719 return bfd_reloc_ok;
8722 return bfd_reloc_notsupported;
8726 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8728 arm_add_to_rel (bfd * abfd,
8730 reloc_howto_type * howto,
8731 bfd_signed_vma increment)
8733 bfd_signed_vma addend;
8735 if (howto->type == R_ARM_THM_CALL
8736 || howto->type == R_ARM_THM_JUMP24)
8738 int upper_insn, lower_insn;
8741 upper_insn = bfd_get_16 (abfd, address);
8742 lower_insn = bfd_get_16 (abfd, address + 2);
8743 upper = upper_insn & 0x7ff;
8744 lower = lower_insn & 0x7ff;
8746 addend = (upper << 12) | (lower << 1);
8747 addend += increment;
8750 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8751 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8753 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8754 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8760 contents = bfd_get_32 (abfd, address);
8762 /* Get the (signed) value from the instruction. */
8763 addend = contents & howto->src_mask;
8764 if (addend & ((howto->src_mask + 1) >> 1))
8766 bfd_signed_vma mask;
8769 mask &= ~ howto->src_mask;
8773 /* Add in the increment, (which is a byte value). */
8774 switch (howto->type)
8777 addend += increment;
8784 addend <<= howto->size;
8785 addend += increment;
8787 /* Should we check for overflow here ? */
8789 /* Drop any undesired bits. */
8790 addend >>= howto->rightshift;
8794 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8796 bfd_put_32 (abfd, contents, address);
8800 #define IS_ARM_TLS_RELOC(R_TYPE) \
8801 ((R_TYPE) == R_ARM_TLS_GD32 \
8802 || (R_TYPE) == R_ARM_TLS_LDO32 \
8803 || (R_TYPE) == R_ARM_TLS_LDM32 \
8804 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8805 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8806 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8807 || (R_TYPE) == R_ARM_TLS_LE32 \
8808 || (R_TYPE) == R_ARM_TLS_IE32)
8810 /* Relocate an ARM ELF section. */
8813 elf32_arm_relocate_section (bfd * output_bfd,
8814 struct bfd_link_info * info,
8816 asection * input_section,
8817 bfd_byte * contents,
8818 Elf_Internal_Rela * relocs,
8819 Elf_Internal_Sym * local_syms,
8820 asection ** local_sections)
8822 Elf_Internal_Shdr *symtab_hdr;
8823 struct elf_link_hash_entry **sym_hashes;
8824 Elf_Internal_Rela *rel;
8825 Elf_Internal_Rela *relend;
8827 struct elf32_arm_link_hash_table * globals;
8829 globals = elf32_arm_hash_table (info);
8830 if (globals == NULL)
8833 symtab_hdr = & elf_symtab_hdr (input_bfd);
8834 sym_hashes = elf_sym_hashes (input_bfd);
8837 relend = relocs + input_section->reloc_count;
8838 for (; rel < relend; rel++)
8841 reloc_howto_type * howto;
8842 unsigned long r_symndx;
8843 Elf_Internal_Sym * sym;
8845 struct elf_link_hash_entry * h;
8847 bfd_reloc_status_type r;
8850 bfd_boolean unresolved_reloc = FALSE;
8851 char *error_message = NULL;
8853 r_symndx = ELF32_R_SYM (rel->r_info);
8854 r_type = ELF32_R_TYPE (rel->r_info);
8855 r_type = arm_real_reloc_type (globals, r_type);
8857 if ( r_type == R_ARM_GNU_VTENTRY
8858 || r_type == R_ARM_GNU_VTINHERIT)
8861 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8862 howto = bfd_reloc.howto;
8868 if (r_symndx < symtab_hdr->sh_info)
8870 sym = local_syms + r_symndx;
8871 sym_type = ELF32_ST_TYPE (sym->st_info);
8872 sec = local_sections[r_symndx];
8874 /* An object file might have a reference to a local
8875 undefined symbol. This is a daft object file, but we
8876 should at least do something about it. V4BX & NONE
8877 relocations do not use the symbol and are explicitly
8878 allowed to use the undefined symbol, so allow those. */
8879 if (r_type != R_ARM_V4BX
8880 && r_type != R_ARM_NONE
8881 && bfd_is_und_section (sec)
8882 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8884 if (!info->callbacks->undefined_symbol
8885 (info, bfd_elf_string_from_elf_section
8886 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8887 input_bfd, input_section,
8888 rel->r_offset, TRUE))
8892 if (globals->use_rel)
8894 relocation = (sec->output_section->vma
8895 + sec->output_offset
8897 if (!info->relocatable
8898 && (sec->flags & SEC_MERGE)
8899 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8902 bfd_vma addend, value;
8906 case R_ARM_MOVW_ABS_NC:
8907 case R_ARM_MOVT_ABS:
8908 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8909 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8910 addend = (addend ^ 0x8000) - 0x8000;
8913 case R_ARM_THM_MOVW_ABS_NC:
8914 case R_ARM_THM_MOVT_ABS:
8915 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8917 value |= bfd_get_16 (input_bfd,
8918 contents + rel->r_offset + 2);
8919 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8920 | ((value & 0x04000000) >> 15);
8921 addend = (addend ^ 0x8000) - 0x8000;
8925 if (howto->rightshift
8926 || (howto->src_mask & (howto->src_mask + 1)))
8928 (*_bfd_error_handler)
8929 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8930 input_bfd, input_section,
8931 (long) rel->r_offset, howto->name);
8935 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8937 /* Get the (signed) value from the instruction. */
8938 addend = value & howto->src_mask;
8939 if (addend & ((howto->src_mask + 1) >> 1))
8941 bfd_signed_vma mask;
8944 mask &= ~ howto->src_mask;
8952 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8954 addend += msec->output_section->vma + msec->output_offset;
8956 /* Cases here must match those in the preceeding
8957 switch statement. */
8960 case R_ARM_MOVW_ABS_NC:
8961 case R_ARM_MOVT_ABS:
8962 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8964 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8967 case R_ARM_THM_MOVW_ABS_NC:
8968 case R_ARM_THM_MOVT_ABS:
8969 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8970 | (addend & 0xff) | ((addend & 0x0800) << 15);
8971 bfd_put_16 (input_bfd, value >> 16,
8972 contents + rel->r_offset);
8973 bfd_put_16 (input_bfd, value,
8974 contents + rel->r_offset + 2);
8978 value = (value & ~ howto->dst_mask)
8979 | (addend & howto->dst_mask);
8980 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8986 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8992 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8993 r_symndx, symtab_hdr, sym_hashes,
8995 unresolved_reloc, warned);
9000 if (sec != NULL && elf_discarded_section (sec))
9002 /* For relocs against symbols from removed linkonce sections,
9003 or sections discarded by a linker script, we just want the
9004 section contents zeroed. Avoid any special processing. */
9005 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
9011 if (info->relocatable)
9013 /* This is a relocatable link. We don't have to change
9014 anything, unless the reloc is against a section symbol,
9015 in which case we have to adjust according to where the
9016 section symbol winds up in the output section. */
9017 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
9019 if (globals->use_rel)
9020 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9021 howto, (bfd_signed_vma) sec->output_offset);
9023 rel->r_addend += sec->output_offset;
9029 name = h->root.root.string;
9032 name = (bfd_elf_string_from_elf_section
9033 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9034 if (name == NULL || *name == '\0')
9035 name = bfd_section_name (input_bfd, sec);
9039 && r_type != R_ARM_NONE
9041 || h->root.type == bfd_link_hash_defined
9042 || h->root.type == bfd_link_hash_defweak)
9043 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9045 (*_bfd_error_handler)
9046 ((sym_type == STT_TLS
9047 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9048 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9051 (long) rel->r_offset,
9056 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9057 input_section, contents, rel,
9058 relocation, info, sec, name,
9059 (h ? ELF_ST_TYPE (h->type) :
9060 ELF_ST_TYPE (sym->st_info)), h,
9061 &unresolved_reloc, &error_message);
9063 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9064 because such sections are not SEC_ALLOC and thus ld.so will
9065 not process them. */
9066 if (unresolved_reloc
9067 && !((input_section->flags & SEC_DEBUGGING) != 0
9070 (*_bfd_error_handler)
9071 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9074 (long) rel->r_offset,
9076 h->root.root.string);
9080 if (r != bfd_reloc_ok)
9084 case bfd_reloc_overflow:
9085 /* If the overflowing reloc was to an undefined symbol,
9086 we have already printed one error message and there
9087 is no point complaining again. */
9089 h->root.type != bfd_link_hash_undefined)
9090 && (!((*info->callbacks->reloc_overflow)
9091 (info, (h ? &h->root : NULL), name, howto->name,
9092 (bfd_vma) 0, input_bfd, input_section,
9097 case bfd_reloc_undefined:
9098 if (!((*info->callbacks->undefined_symbol)
9099 (info, name, input_bfd, input_section,
9100 rel->r_offset, TRUE)))
9104 case bfd_reloc_outofrange:
9105 error_message = _("out of range");
9108 case bfd_reloc_notsupported:
9109 error_message = _("unsupported relocation");
9112 case bfd_reloc_dangerous:
9113 /* error_message should already be set. */
9117 error_message = _("unknown error");
9121 BFD_ASSERT (error_message != NULL);
9122 if (!((*info->callbacks->reloc_dangerous)
9123 (info, error_message, input_bfd, input_section,
9134 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9135 adds the edit to the start of the list. (The list must be built in order of
9136 ascending TINDEX: the function's callers are primarily responsible for
9137 maintaining that condition). */
9140 add_unwind_table_edit (arm_unwind_table_edit **head,
9141 arm_unwind_table_edit **tail,
9142 arm_unwind_edit_type type,
9143 asection *linked_section,
9144 unsigned int tindex)
9146 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9147 xmalloc (sizeof (arm_unwind_table_edit));
9149 new_edit->type = type;
9150 new_edit->linked_section = linked_section;
9151 new_edit->index = tindex;
9155 new_edit->next = NULL;
9158 (*tail)->next = new_edit;
9167 new_edit->next = *head;
9176 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9178 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9180 adjust_exidx_size(asection *exidx_sec, int adjust)
9184 if (!exidx_sec->rawsize)
9185 exidx_sec->rawsize = exidx_sec->size;
9187 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9188 out_sec = exidx_sec->output_section;
9189 /* Adjust size of output section. */
9190 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9193 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9195 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9197 struct _arm_elf_section_data *exidx_arm_data;
9199 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9200 add_unwind_table_edit (
9201 &exidx_arm_data->u.exidx.unwind_edit_list,
9202 &exidx_arm_data->u.exidx.unwind_edit_tail,
9203 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9205 adjust_exidx_size(exidx_sec, 8);
9208 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9209 made to those tables, such that:
9211 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9212 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9213 codes which have been inlined into the index).
9215 The edits are applied when the tables are written
9216 (in elf32_arm_write_section).
9220 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9221 unsigned int num_text_sections,
9222 struct bfd_link_info *info)
9225 unsigned int last_second_word = 0, i;
9226 asection *last_exidx_sec = NULL;
9227 asection *last_text_sec = NULL;
9228 int last_unwind_type = -1;
9230 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9232 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9236 for (sec = inp->sections; sec != NULL; sec = sec->next)
9238 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9239 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9241 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9244 if (elf_sec->linked_to)
9246 Elf_Internal_Shdr *linked_hdr
9247 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9248 struct _arm_elf_section_data *linked_sec_arm_data
9249 = get_arm_elf_section_data (linked_hdr->bfd_section);
9251 if (linked_sec_arm_data == NULL)
9254 /* Link this .ARM.exidx section back from the text section it
9256 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9261 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9262 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9263 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9265 for (i = 0; i < num_text_sections; i++)
9267 asection *sec = text_section_order[i];
9268 asection *exidx_sec;
9269 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9270 struct _arm_elf_section_data *exidx_arm_data;
9271 bfd_byte *contents = NULL;
9272 int deleted_exidx_bytes = 0;
9274 arm_unwind_table_edit *unwind_edit_head = NULL;
9275 arm_unwind_table_edit *unwind_edit_tail = NULL;
9276 Elf_Internal_Shdr *hdr;
9279 if (arm_data == NULL)
9282 exidx_sec = arm_data->u.text.arm_exidx_sec;
9283 if (exidx_sec == NULL)
9285 /* Section has no unwind data. */
9286 if (last_unwind_type == 0 || !last_exidx_sec)
9289 /* Ignore zero sized sections. */
9293 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9294 last_unwind_type = 0;
9298 /* Skip /DISCARD/ sections. */
9299 if (bfd_is_abs_section (exidx_sec->output_section))
9302 hdr = &elf_section_data (exidx_sec)->this_hdr;
9303 if (hdr->sh_type != SHT_ARM_EXIDX)
9306 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9307 if (exidx_arm_data == NULL)
9310 ibfd = exidx_sec->owner;
9312 if (hdr->contents != NULL)
9313 contents = hdr->contents;
9314 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9318 for (j = 0; j < hdr->sh_size; j += 8)
9320 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9324 /* An EXIDX_CANTUNWIND entry. */
9325 if (second_word == 1)
9327 if (last_unwind_type == 0)
9331 /* Inlined unwinding data. Merge if equal to previous. */
9332 else if ((second_word & 0x80000000) != 0)
9334 if (last_second_word == second_word && last_unwind_type == 1)
9337 last_second_word = second_word;
9339 /* Normal table entry. In theory we could merge these too,
9340 but duplicate entries are likely to be much less common. */
9346 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9347 DELETE_EXIDX_ENTRY, NULL, j / 8);
9349 deleted_exidx_bytes += 8;
9352 last_unwind_type = unwind_type;
9355 /* Free contents if we allocated it ourselves. */
9356 if (contents != hdr->contents)
9359 /* Record edits to be applied later (in elf32_arm_write_section). */
9360 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9361 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9363 if (deleted_exidx_bytes > 0)
9364 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9366 last_exidx_sec = exidx_sec;
9367 last_text_sec = sec;
9370 /* Add terminating CANTUNWIND entry. */
9371 if (last_exidx_sec && last_unwind_type != 0)
9372 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9378 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9379 bfd *ibfd, const char *name)
9381 asection *sec, *osec;
9383 sec = bfd_get_section_by_name (ibfd, name);
9384 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9387 osec = sec->output_section;
9388 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9391 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9392 sec->output_offset, sec->size))
9399 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9401 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9402 asection *sec, *osec;
9404 if (globals == NULL)
9407 /* Invoke the regular ELF backend linker to do all the work. */
9408 if (!bfd_elf_final_link (abfd, info))
9411 /* Process stub sections (eg BE8 encoding, ...). */
9412 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9414 for(i=0; i<htab->top_id; i++) {
9415 sec = htab->stub_group[i].stub_sec;
9417 osec = sec->output_section;
9418 elf32_arm_write_section (abfd, info, sec, sec->contents);
9419 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9420 sec->output_offset, sec->size))
9425 /* Write out any glue sections now that we have created all the
9427 if (globals->bfd_of_glue_owner != NULL)
9429 if (! elf32_arm_output_glue_section (info, abfd,
9430 globals->bfd_of_glue_owner,
9431 ARM2THUMB_GLUE_SECTION_NAME))
9434 if (! elf32_arm_output_glue_section (info, abfd,
9435 globals->bfd_of_glue_owner,
9436 THUMB2ARM_GLUE_SECTION_NAME))
9439 if (! elf32_arm_output_glue_section (info, abfd,
9440 globals->bfd_of_glue_owner,
9441 VFP11_ERRATUM_VENEER_SECTION_NAME))
9444 if (! elf32_arm_output_glue_section (info, abfd,
9445 globals->bfd_of_glue_owner,
9446 ARM_BX_GLUE_SECTION_NAME))
9453 /* Set the right machine number. */
9456 elf32_arm_object_p (bfd *abfd)
9460 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9462 if (mach != bfd_mach_arm_unknown)
9463 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9465 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9466 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9469 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9474 /* Function to keep ARM specific flags in the ELF header. */
9477 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9479 if (elf_flags_init (abfd)
9480 && elf_elfheader (abfd)->e_flags != flags)
9482 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9484 if (flags & EF_ARM_INTERWORK)
9485 (*_bfd_error_handler)
9486 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9490 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9496 elf_elfheader (abfd)->e_flags = flags;
9497 elf_flags_init (abfd) = TRUE;
9503 /* Copy backend specific data from one object module to another. */
9506 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9511 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9514 in_flags = elf_elfheader (ibfd)->e_flags;
9515 out_flags = elf_elfheader (obfd)->e_flags;
9517 if (elf_flags_init (obfd)
9518 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9519 && in_flags != out_flags)
9521 /* Cannot mix APCS26 and APCS32 code. */
9522 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9525 /* Cannot mix float APCS and non-float APCS code. */
9526 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9529 /* If the src and dest have different interworking flags
9530 then turn off the interworking bit. */
9531 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9533 if (out_flags & EF_ARM_INTERWORK)
9535 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9538 in_flags &= ~EF_ARM_INTERWORK;
9541 /* Likewise for PIC, though don't warn for this case. */
9542 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9543 in_flags &= ~EF_ARM_PIC;
9546 elf_elfheader (obfd)->e_flags = in_flags;
9547 elf_flags_init (obfd) = TRUE;
9549 /* Also copy the EI_OSABI field. */
9550 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9551 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9553 /* Copy object attributes. */
9554 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9559 /* Values for Tag_ABI_PCS_R9_use. */
9568 /* Values for Tag_ABI_PCS_RW_data. */
9571 AEABI_PCS_RW_data_absolute,
9572 AEABI_PCS_RW_data_PCrel,
9573 AEABI_PCS_RW_data_SBrel,
9574 AEABI_PCS_RW_data_unused
9577 /* Values for Tag_ABI_enum_size. */
9583 AEABI_enum_forced_wide
9586 /* Determine whether an object attribute tag takes an integer, a
9590 elf32_arm_obj_attrs_arg_type (int tag)
9592 if (tag == Tag_compatibility)
9593 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9594 else if (tag == Tag_nodefaults)
9595 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9596 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9597 return ATTR_TYPE_FLAG_STR_VAL;
9599 return ATTR_TYPE_FLAG_INT_VAL;
9601 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9604 /* The ABI defines that Tag_conformance should be emitted first, and that
9605 Tag_nodefaults should be second (if either is defined). This sets those
9606 two positions, and bumps up the position of all the remaining tags to
9609 elf32_arm_obj_attrs_order (int num)
9612 return Tag_conformance;
9614 return Tag_nodefaults;
9615 if ((num - 2) < Tag_nodefaults)
9617 if ((num - 1) < Tag_conformance)
9622 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9623 Returns -1 if no architecture could be read. */
9626 get_secondary_compatible_arch (bfd *abfd)
9628 obj_attribute *attr =
9629 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9631 /* Note: the tag and its argument below are uleb128 values, though
9632 currently-defined values fit in one byte for each. */
9634 && attr->s[0] == Tag_CPU_arch
9635 && (attr->s[1] & 128) != 128
9639 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9643 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9644 The tag is removed if ARCH is -1. */
9647 set_secondary_compatible_arch (bfd *abfd, int arch)
9649 obj_attribute *attr =
9650 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9658 /* Note: the tag and its argument below are uleb128 values, though
9659 currently-defined values fit in one byte for each. */
9661 attr->s = (char *) bfd_alloc (abfd, 3);
9662 attr->s[0] = Tag_CPU_arch;
9667 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9671 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9672 int newtag, int secondary_compat)
9674 #define T(X) TAG_CPU_ARCH_##X
9675 int tagl, tagh, result;
9678 T(V6T2), /* PRE_V4. */
9682 T(V6T2), /* V5TE. */
9683 T(V6T2), /* V5TEJ. */
9690 T(V6K), /* PRE_V4. */
9695 T(V6K), /* V5TEJ. */
9697 T(V6KZ), /* V6KZ. */
9703 T(V7), /* PRE_V4. */
9722 T(V6K), /* V5TEJ. */
9724 T(V6KZ), /* V6KZ. */
9737 T(V6K), /* V5TEJ. */
9739 T(V6KZ), /* V6KZ. */
9743 T(V6S_M), /* V6_M. */
9744 T(V6S_M) /* V6S_M. */
9750 T(V7E_M), /* V4T. */
9751 T(V7E_M), /* V5T. */
9752 T(V7E_M), /* V5TE. */
9753 T(V7E_M), /* V5TEJ. */
9755 T(V7E_M), /* V6KZ. */
9756 T(V7E_M), /* V6T2. */
9757 T(V7E_M), /* V6K. */
9759 T(V7E_M), /* V6_M. */
9760 T(V7E_M), /* V6S_M. */
9761 T(V7E_M) /* V7E_M. */
9763 const int v4t_plus_v6_m[] =
9769 T(V5TE), /* V5TE. */
9770 T(V5TEJ), /* V5TEJ. */
9772 T(V6KZ), /* V6KZ. */
9773 T(V6T2), /* V6T2. */
9776 T(V6_M), /* V6_M. */
9777 T(V6S_M), /* V6S_M. */
9778 T(V7E_M), /* V7E_M. */
9779 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9789 /* Pseudo-architecture. */
9793 /* Check we've not got a higher architecture than we know about. */
9795 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9797 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9801 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9803 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9804 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9805 oldtag = T(V4T_PLUS_V6_M);
9807 /* And override the new tag if we have a Tag_also_compatible_with on the
9810 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9811 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9812 newtag = T(V4T_PLUS_V6_M);
9814 tagl = (oldtag < newtag) ? oldtag : newtag;
9815 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9817 /* Architectures before V6KZ add features monotonically. */
9818 if (tagh <= TAG_CPU_ARCH_V6KZ)
9821 result = comb[tagh - T(V6T2)][tagl];
9823 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9824 as the canonical version. */
9825 if (result == T(V4T_PLUS_V6_M))
9828 *secondary_compat_out = T(V6_M);
9831 *secondary_compat_out = -1;
9835 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9836 ibfd, oldtag, newtag);
9844 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9845 are conflicting attributes. */
9848 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9850 obj_attribute *in_attr;
9851 obj_attribute *out_attr;
9852 obj_attribute_list *in_list;
9853 obj_attribute_list *out_list;
9854 obj_attribute_list **out_listp;
9855 /* Some tags have 0 = don't care, 1 = strong requirement,
9856 2 = weak requirement. */
9857 static const int order_021[3] = {0, 2, 1};
9859 bfd_boolean result = TRUE;
9861 /* Skip the linker stubs file. This preserves previous behavior
9862 of accepting unknown attributes in the first input file - but
9864 if (ibfd->flags & BFD_LINKER_CREATED)
9867 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9869 /* This is the first object. Copy the attributes. */
9870 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9872 out_attr = elf_known_obj_attributes_proc (obfd);
9874 /* Use the Tag_null value to indicate the attributes have been
9878 /* We do not output objects with Tag_MPextension_use_legacy - we move
9879 the attribute's value to Tag_MPextension_use. */
9880 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9882 if (out_attr[Tag_MPextension_use].i != 0
9883 && out_attr[Tag_MPextension_use_legacy].i
9884 != out_attr[Tag_MPextension_use].i)
9887 (_("Error: %B has both the current and legacy "
9888 "Tag_MPextension_use attributes"), ibfd);
9892 out_attr[Tag_MPextension_use] =
9893 out_attr[Tag_MPextension_use_legacy];
9894 out_attr[Tag_MPextension_use_legacy].type = 0;
9895 out_attr[Tag_MPextension_use_legacy].i = 0;
9901 in_attr = elf_known_obj_attributes_proc (ibfd);
9902 out_attr = elf_known_obj_attributes_proc (obfd);
9903 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9904 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9906 /* Ignore mismatches if the object doesn't use floating point. */
9907 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9908 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9909 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9912 (_("error: %B uses VFP register arguments, %B does not"),
9913 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9914 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9919 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9921 /* Merge this attribute with existing attributes. */
9924 case Tag_CPU_raw_name:
9926 /* These are merged after Tag_CPU_arch. */
9929 case Tag_ABI_optimization_goals:
9930 case Tag_ABI_FP_optimization_goals:
9931 /* Use the first value seen. */
9936 int secondary_compat = -1, secondary_compat_out = -1;
9937 unsigned int saved_out_attr = out_attr[i].i;
9938 static const char *name_table[] = {
9939 /* These aren't real CPU names, but we can't guess
9940 that from the architecture version alone. */
9956 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9957 secondary_compat = get_secondary_compatible_arch (ibfd);
9958 secondary_compat_out = get_secondary_compatible_arch (obfd);
9959 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9960 &secondary_compat_out,
9963 set_secondary_compatible_arch (obfd, secondary_compat_out);
9965 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9966 if (out_attr[i].i == saved_out_attr)
9967 ; /* Leave the names alone. */
9968 else if (out_attr[i].i == in_attr[i].i)
9970 /* The output architecture has been changed to match the
9971 input architecture. Use the input names. */
9972 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9973 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9975 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9976 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9981 out_attr[Tag_CPU_name].s = NULL;
9982 out_attr[Tag_CPU_raw_name].s = NULL;
9985 /* If we still don't have a value for Tag_CPU_name,
9986 make one up now. Tag_CPU_raw_name remains blank. */
9987 if (out_attr[Tag_CPU_name].s == NULL
9988 && out_attr[i].i < ARRAY_SIZE (name_table))
9989 out_attr[Tag_CPU_name].s =
9990 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9994 case Tag_ARM_ISA_use:
9995 case Tag_THUMB_ISA_use:
9997 case Tag_Advanced_SIMD_arch:
9998 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9999 case Tag_ABI_FP_rounding:
10000 case Tag_ABI_FP_exceptions:
10001 case Tag_ABI_FP_user_exceptions:
10002 case Tag_ABI_FP_number_model:
10003 case Tag_VFP_HP_extension:
10004 case Tag_CPU_unaligned_access:
10006 case Tag_Virtualization_use:
10007 case Tag_MPextension_use:
10008 /* Use the largest value specified. */
10009 if (in_attr[i].i > out_attr[i].i)
10010 out_attr[i].i = in_attr[i].i;
10013 case Tag_ABI_align8_preserved:
10014 case Tag_ABI_PCS_RO_data:
10015 /* Use the smallest value specified. */
10016 if (in_attr[i].i < out_attr[i].i)
10017 out_attr[i].i = in_attr[i].i;
10020 case Tag_ABI_align8_needed:
10021 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10022 && (in_attr[Tag_ABI_align8_preserved].i == 0
10023 || out_attr[Tag_ABI_align8_preserved].i == 0))
10025 /* This error message should be enabled once all non-conformant
10026 binaries in the toolchain have had the attributes set
10029 (_("error: %B: 8-byte data alignment conflicts with %B"),
10033 /* Fall through. */
10034 case Tag_ABI_FP_denormal:
10035 case Tag_ABI_PCS_GOT_use:
10036 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10037 value if greater than 2 (for future-proofing). */
10038 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10039 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10040 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10041 out_attr[i].i = in_attr[i].i;
10045 case Tag_CPU_arch_profile:
10046 if (out_attr[i].i != in_attr[i].i)
10048 /* 0 will merge with anything.
10049 'A' and 'S' merge to 'A'.
10050 'R' and 'S' merge to 'R'.
10051 'M' and 'A|R|S' is an error. */
10052 if (out_attr[i].i == 0
10053 || (out_attr[i].i == 'S'
10054 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10055 out_attr[i].i = in_attr[i].i;
10056 else if (in_attr[i].i == 0
10057 || (in_attr[i].i == 'S'
10058 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10059 ; /* Do nothing. */
10063 (_("error: %B: Conflicting architecture profiles %c/%c"),
10065 in_attr[i].i ? in_attr[i].i : '0',
10066 out_attr[i].i ? out_attr[i].i : '0');
10073 static const struct
10077 } vfp_versions[7] =
10091 /* Values greater than 6 aren't defined, so just pick the
10093 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10095 out_attr[i] = in_attr[i];
10098 /* The output uses the superset of input features
10099 (ISA version) and registers. */
10100 ver = vfp_versions[in_attr[i].i].ver;
10101 if (ver < vfp_versions[out_attr[i].i].ver)
10102 ver = vfp_versions[out_attr[i].i].ver;
10103 regs = vfp_versions[in_attr[i].i].regs;
10104 if (regs < vfp_versions[out_attr[i].i].regs)
10105 regs = vfp_versions[out_attr[i].i].regs;
10106 /* This assumes all possible supersets are also a valid
10108 for (newval = 6; newval > 0; newval--)
10110 if (regs == vfp_versions[newval].regs
10111 && ver == vfp_versions[newval].ver)
10114 out_attr[i].i = newval;
10117 case Tag_PCS_config:
10118 if (out_attr[i].i == 0)
10119 out_attr[i].i = in_attr[i].i;
10120 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10122 /* It's sometimes ok to mix different configs, so this is only
10125 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10128 case Tag_ABI_PCS_R9_use:
10129 if (in_attr[i].i != out_attr[i].i
10130 && out_attr[i].i != AEABI_R9_unused
10131 && in_attr[i].i != AEABI_R9_unused)
10134 (_("error: %B: Conflicting use of R9"), ibfd);
10137 if (out_attr[i].i == AEABI_R9_unused)
10138 out_attr[i].i = in_attr[i].i;
10140 case Tag_ABI_PCS_RW_data:
10141 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10142 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10143 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10146 (_("error: %B: SB relative addressing conflicts with use of R9"),
10150 /* Use the smallest value specified. */
10151 if (in_attr[i].i < out_attr[i].i)
10152 out_attr[i].i = in_attr[i].i;
10154 case Tag_ABI_PCS_wchar_t:
10155 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10156 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10159 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10160 ibfd, in_attr[i].i, out_attr[i].i);
10162 else if (in_attr[i].i && !out_attr[i].i)
10163 out_attr[i].i = in_attr[i].i;
10165 case Tag_ABI_enum_size:
10166 if (in_attr[i].i != AEABI_enum_unused)
10168 if (out_attr[i].i == AEABI_enum_unused
10169 || out_attr[i].i == AEABI_enum_forced_wide)
10171 /* The existing object is compatible with anything.
10172 Use whatever requirements the new object has. */
10173 out_attr[i].i = in_attr[i].i;
10175 else if (in_attr[i].i != AEABI_enum_forced_wide
10176 && out_attr[i].i != in_attr[i].i
10177 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10179 static const char *aeabi_enum_names[] =
10180 { "", "variable-size", "32-bit", "" };
10181 const char *in_name =
10182 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10183 ? aeabi_enum_names[in_attr[i].i]
10185 const char *out_name =
10186 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10187 ? aeabi_enum_names[out_attr[i].i]
10190 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10191 ibfd, in_name, out_name);
10195 case Tag_ABI_VFP_args:
10198 case Tag_ABI_WMMX_args:
10199 if (in_attr[i].i != out_attr[i].i)
10202 (_("error: %B uses iWMMXt register arguments, %B does not"),
10207 case Tag_compatibility:
10208 /* Merged in target-independent code. */
10210 case Tag_ABI_HardFP_use:
10211 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
10212 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
10213 || (in_attr[i].i == 2 && out_attr[i].i == 1))
10215 else if (in_attr[i].i > out_attr[i].i)
10216 out_attr[i].i = in_attr[i].i;
10218 case Tag_ABI_FP_16bit_format:
10219 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10221 if (in_attr[i].i != out_attr[i].i)
10224 (_("error: fp16 format mismatch between %B and %B"),
10229 if (in_attr[i].i != 0)
10230 out_attr[i].i = in_attr[i].i;
10234 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10235 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10236 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10237 CPU. We will merge as follows: If the input attribute's value
10238 is one then the output attribute's value remains unchanged. If
10239 the input attribute's value is zero or two then if the output
10240 attribute's value is one the output value is set to the input
10241 value, otherwise the output value must be the same as the
10243 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10245 if (in_attr[i].i != out_attr[i].i)
10248 (_("DIV usage mismatch between %B and %B"),
10254 if (in_attr[i].i != 1)
10255 out_attr[i].i = in_attr[i].i;
10259 case Tag_MPextension_use_legacy:
10260 /* We don't output objects with Tag_MPextension_use_legacy - we
10261 move the value to Tag_MPextension_use. */
10262 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10264 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10267 (_("%B has has both the current and legacy "
10268 "Tag_MPextension_use attributes"),
10274 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10275 out_attr[Tag_MPextension_use] = in_attr[i];
10279 case Tag_nodefaults:
10280 /* This tag is set if it exists, but the value is unused (and is
10281 typically zero). We don't actually need to do anything here -
10282 the merge happens automatically when the type flags are merged
10285 case Tag_also_compatible_with:
10286 /* Already done in Tag_CPU_arch. */
10288 case Tag_conformance:
10289 /* Keep the attribute if it matches. Throw it away otherwise.
10290 No attribute means no claim to conform. */
10291 if (!in_attr[i].s || !out_attr[i].s
10292 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10293 out_attr[i].s = NULL;
10298 bfd *err_bfd = NULL;
10300 /* The "known_obj_attributes" table does contain some undefined
10301 attributes. Ensure that there are unused. */
10302 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10304 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10307 if (err_bfd != NULL)
10309 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10310 if ((i & 127) < 64)
10313 (_("%B: Unknown mandatory EABI object attribute %d"),
10315 bfd_set_error (bfd_error_bad_value);
10321 (_("Warning: %B: Unknown EABI object attribute %d"),
10326 /* Only pass on attributes that match in both inputs. */
10327 if (in_attr[i].i != out_attr[i].i
10328 || in_attr[i].s != out_attr[i].s
10329 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10330 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10333 out_attr[i].s = NULL;
10338 /* If out_attr was copied from in_attr then it won't have a type yet. */
10339 if (in_attr[i].type && !out_attr[i].type)
10340 out_attr[i].type = in_attr[i].type;
10343 /* Merge Tag_compatibility attributes and any common GNU ones. */
10344 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10347 /* Check for any attributes not known on ARM. */
10348 in_list = elf_other_obj_attributes_proc (ibfd);
10349 out_listp = &elf_other_obj_attributes_proc (obfd);
10350 out_list = *out_listp;
10352 for (; in_list || out_list; )
10354 bfd *err_bfd = NULL;
10357 /* The tags for each list are in numerical order. */
10358 /* If the tags are equal, then merge. */
10359 if (out_list && (!in_list || in_list->tag > out_list->tag))
10361 /* This attribute only exists in obfd. We can't merge, and we don't
10362 know what the tag means, so delete it. */
10364 err_tag = out_list->tag;
10365 *out_listp = out_list->next;
10366 out_list = *out_listp;
10368 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10370 /* This attribute only exists in ibfd. We can't merge, and we don't
10371 know what the tag means, so ignore it. */
10373 err_tag = in_list->tag;
10374 in_list = in_list->next;
10376 else /* The tags are equal. */
10378 /* As present, all attributes in the list are unknown, and
10379 therefore can't be merged meaningfully. */
10381 err_tag = out_list->tag;
10383 /* Only pass on attributes that match in both inputs. */
10384 if (in_list->attr.i != out_list->attr.i
10385 || in_list->attr.s != out_list->attr.s
10386 || (in_list->attr.s && out_list->attr.s
10387 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10389 /* No match. Delete the attribute. */
10390 *out_listp = out_list->next;
10391 out_list = *out_listp;
10395 /* Matched. Keep the attribute and move to the next. */
10396 out_list = out_list->next;
10397 in_list = in_list->next;
10403 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10404 if ((err_tag & 127) < 64)
10407 (_("%B: Unknown mandatory EABI object attribute %d"),
10409 bfd_set_error (bfd_error_bad_value);
10415 (_("Warning: %B: Unknown EABI object attribute %d"),
10424 /* Return TRUE if the two EABI versions are incompatible. */
10427 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10429 /* v4 and v5 are the same spec before and after it was released,
10430 so allow mixing them. */
10431 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10432 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10435 return (iver == over);
10438 /* Merge backend specific data from an object file to the output
10439 object file when linking. */
10442 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10444 /* Display the flags field. */
10447 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10449 FILE * file = (FILE *) ptr;
10450 unsigned long flags;
10452 BFD_ASSERT (abfd != NULL && ptr != NULL);
10454 /* Print normal ELF private data. */
10455 _bfd_elf_print_private_bfd_data (abfd, ptr);
10457 flags = elf_elfheader (abfd)->e_flags;
10458 /* Ignore init flag - it may not be set, despite the flags field
10459 containing valid data. */
10461 /* xgettext:c-format */
10462 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10464 switch (EF_ARM_EABI_VERSION (flags))
10466 case EF_ARM_EABI_UNKNOWN:
10467 /* The following flag bits are GNU extensions and not part of the
10468 official ARM ELF extended ABI. Hence they are only decoded if
10469 the EABI version is not set. */
10470 if (flags & EF_ARM_INTERWORK)
10471 fprintf (file, _(" [interworking enabled]"));
10473 if (flags & EF_ARM_APCS_26)
10474 fprintf (file, " [APCS-26]");
10476 fprintf (file, " [APCS-32]");
10478 if (flags & EF_ARM_VFP_FLOAT)
10479 fprintf (file, _(" [VFP float format]"));
10480 else if (flags & EF_ARM_MAVERICK_FLOAT)
10481 fprintf (file, _(" [Maverick float format]"));
10483 fprintf (file, _(" [FPA float format]"));
10485 if (flags & EF_ARM_APCS_FLOAT)
10486 fprintf (file, _(" [floats passed in float registers]"));
10488 if (flags & EF_ARM_PIC)
10489 fprintf (file, _(" [position independent]"));
10491 if (flags & EF_ARM_NEW_ABI)
10492 fprintf (file, _(" [new ABI]"));
10494 if (flags & EF_ARM_OLD_ABI)
10495 fprintf (file, _(" [old ABI]"));
10497 if (flags & EF_ARM_SOFT_FLOAT)
10498 fprintf (file, _(" [software FP]"));
10500 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10501 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10502 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10503 | EF_ARM_MAVERICK_FLOAT);
10506 case EF_ARM_EABI_VER1:
10507 fprintf (file, _(" [Version1 EABI]"));
10509 if (flags & EF_ARM_SYMSARESORTED)
10510 fprintf (file, _(" [sorted symbol table]"));
10512 fprintf (file, _(" [unsorted symbol table]"));
10514 flags &= ~ EF_ARM_SYMSARESORTED;
10517 case EF_ARM_EABI_VER2:
10518 fprintf (file, _(" [Version2 EABI]"));
10520 if (flags & EF_ARM_SYMSARESORTED)
10521 fprintf (file, _(" [sorted symbol table]"));
10523 fprintf (file, _(" [unsorted symbol table]"));
10525 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10526 fprintf (file, _(" [dynamic symbols use segment index]"));
10528 if (flags & EF_ARM_MAPSYMSFIRST)
10529 fprintf (file, _(" [mapping symbols precede others]"));
10531 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10532 | EF_ARM_MAPSYMSFIRST);
10535 case EF_ARM_EABI_VER3:
10536 fprintf (file, _(" [Version3 EABI]"));
10539 case EF_ARM_EABI_VER4:
10540 fprintf (file, _(" [Version4 EABI]"));
10543 case EF_ARM_EABI_VER5:
10544 fprintf (file, _(" [Version5 EABI]"));
10546 if (flags & EF_ARM_BE8)
10547 fprintf (file, _(" [BE8]"));
10549 if (flags & EF_ARM_LE8)
10550 fprintf (file, _(" [LE8]"));
10552 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10556 fprintf (file, _(" <EABI version unrecognised>"));
10560 flags &= ~ EF_ARM_EABIMASK;
10562 if (flags & EF_ARM_RELEXEC)
10563 fprintf (file, _(" [relocatable executable]"));
10565 if (flags & EF_ARM_HASENTRY)
10566 fprintf (file, _(" [has entry point]"));
10568 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10571 fprintf (file, _("<Unrecognised flag bits set>"));
10573 fputc ('\n', file);
10579 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10581 switch (ELF_ST_TYPE (elf_sym->st_info))
10583 case STT_ARM_TFUNC:
10584 return ELF_ST_TYPE (elf_sym->st_info);
10586 case STT_ARM_16BIT:
10587 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10588 This allows us to distinguish between data used by Thumb instructions
10589 and non-data (which is probably code) inside Thumb regions of an
10591 if (type != STT_OBJECT && type != STT_TLS)
10592 return ELF_ST_TYPE (elf_sym->st_info);
10603 elf32_arm_gc_mark_hook (asection *sec,
10604 struct bfd_link_info *info,
10605 Elf_Internal_Rela *rel,
10606 struct elf_link_hash_entry *h,
10607 Elf_Internal_Sym *sym)
10610 switch (ELF32_R_TYPE (rel->r_info))
10612 case R_ARM_GNU_VTINHERIT:
10613 case R_ARM_GNU_VTENTRY:
10617 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10620 /* Update the got entry reference counts for the section being removed. */
10623 elf32_arm_gc_sweep_hook (bfd * abfd,
10624 struct bfd_link_info * info,
10626 const Elf_Internal_Rela * relocs)
10628 Elf_Internal_Shdr *symtab_hdr;
10629 struct elf_link_hash_entry **sym_hashes;
10630 bfd_signed_vma *local_got_refcounts;
10631 const Elf_Internal_Rela *rel, *relend;
10632 struct elf32_arm_link_hash_table * globals;
10634 if (info->relocatable)
10637 globals = elf32_arm_hash_table (info);
10638 if (globals == NULL)
10641 elf_section_data (sec)->local_dynrel = NULL;
10643 symtab_hdr = & elf_symtab_hdr (abfd);
10644 sym_hashes = elf_sym_hashes (abfd);
10645 local_got_refcounts = elf_local_got_refcounts (abfd);
10647 check_use_blx (globals);
10649 relend = relocs + sec->reloc_count;
10650 for (rel = relocs; rel < relend; rel++)
10652 unsigned long r_symndx;
10653 struct elf_link_hash_entry *h = NULL;
10656 r_symndx = ELF32_R_SYM (rel->r_info);
10657 if (r_symndx >= symtab_hdr->sh_info)
10659 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10660 while (h->root.type == bfd_link_hash_indirect
10661 || h->root.type == bfd_link_hash_warning)
10662 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10665 r_type = ELF32_R_TYPE (rel->r_info);
10666 r_type = arm_real_reloc_type (globals, r_type);
10670 case R_ARM_GOT_PREL:
10671 case R_ARM_TLS_GD32:
10672 case R_ARM_TLS_IE32:
10675 if (h->got.refcount > 0)
10676 h->got.refcount -= 1;
10678 else if (local_got_refcounts != NULL)
10680 if (local_got_refcounts[r_symndx] > 0)
10681 local_got_refcounts[r_symndx] -= 1;
10685 case R_ARM_TLS_LDM32:
10686 globals->tls_ldm_got.refcount -= 1;
10690 case R_ARM_ABS32_NOI:
10692 case R_ARM_REL32_NOI:
10698 case R_ARM_THM_CALL:
10699 case R_ARM_THM_JUMP24:
10700 case R_ARM_THM_JUMP19:
10701 case R_ARM_MOVW_ABS_NC:
10702 case R_ARM_MOVT_ABS:
10703 case R_ARM_MOVW_PREL_NC:
10704 case R_ARM_MOVT_PREL:
10705 case R_ARM_THM_MOVW_ABS_NC:
10706 case R_ARM_THM_MOVT_ABS:
10707 case R_ARM_THM_MOVW_PREL_NC:
10708 case R_ARM_THM_MOVT_PREL:
10709 /* Should the interworking branches be here also? */
10713 struct elf32_arm_link_hash_entry *eh;
10714 struct elf32_arm_relocs_copied **pp;
10715 struct elf32_arm_relocs_copied *p;
10717 eh = (struct elf32_arm_link_hash_entry *) h;
10719 if (h->plt.refcount > 0)
10721 h->plt.refcount -= 1;
10722 if (r_type == R_ARM_THM_CALL)
10723 eh->plt_maybe_thumb_refcount--;
10725 if (r_type == R_ARM_THM_JUMP24
10726 || r_type == R_ARM_THM_JUMP19)
10727 eh->plt_thumb_refcount--;
10730 if (r_type == R_ARM_ABS32
10731 || r_type == R_ARM_REL32
10732 || r_type == R_ARM_ABS32_NOI
10733 || r_type == R_ARM_REL32_NOI)
10735 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10737 if (p->section == sec)
10740 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10741 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10759 /* Look through the relocs for a section during the first phase. */
10762 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10763 asection *sec, const Elf_Internal_Rela *relocs)
10765 Elf_Internal_Shdr *symtab_hdr;
10766 struct elf_link_hash_entry **sym_hashes;
10767 const Elf_Internal_Rela *rel;
10768 const Elf_Internal_Rela *rel_end;
10771 bfd_vma *local_got_offsets;
10772 struct elf32_arm_link_hash_table *htab;
10773 bfd_boolean needs_plt;
10774 unsigned long nsyms;
10776 if (info->relocatable)
10779 BFD_ASSERT (is_arm_elf (abfd));
10781 htab = elf32_arm_hash_table (info);
10787 /* Create dynamic sections for relocatable executables so that we can
10788 copy relocations. */
10789 if (htab->root.is_relocatable_executable
10790 && ! htab->root.dynamic_sections_created)
10792 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10796 dynobj = elf_hash_table (info)->dynobj;
10797 local_got_offsets = elf_local_got_offsets (abfd);
10799 symtab_hdr = & elf_symtab_hdr (abfd);
10800 sym_hashes = elf_sym_hashes (abfd);
10801 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10803 rel_end = relocs + sec->reloc_count;
10804 for (rel = relocs; rel < rel_end; rel++)
10806 struct elf_link_hash_entry *h;
10807 struct elf32_arm_link_hash_entry *eh;
10808 unsigned long r_symndx;
10811 r_symndx = ELF32_R_SYM (rel->r_info);
10812 r_type = ELF32_R_TYPE (rel->r_info);
10813 r_type = arm_real_reloc_type (htab, r_type);
10815 if (r_symndx >= nsyms
10816 /* PR 9934: It is possible to have relocations that do not
10817 refer to symbols, thus it is also possible to have an
10818 object file containing relocations but no symbol table. */
10819 && (r_symndx > 0 || nsyms > 0))
10821 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10826 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10830 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10831 while (h->root.type == bfd_link_hash_indirect
10832 || h->root.type == bfd_link_hash_warning)
10833 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10836 eh = (struct elf32_arm_link_hash_entry *) h;
10841 case R_ARM_GOT_PREL:
10842 case R_ARM_TLS_GD32:
10843 case R_ARM_TLS_IE32:
10844 /* This symbol requires a global offset table entry. */
10846 int tls_type, old_tls_type;
10850 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10851 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10852 default: tls_type = GOT_NORMAL; break;
10858 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10862 bfd_signed_vma *local_got_refcounts;
10864 /* This is a global offset table entry for a local symbol. */
10865 local_got_refcounts = elf_local_got_refcounts (abfd);
10866 if (local_got_refcounts == NULL)
10868 bfd_size_type size;
10870 size = symtab_hdr->sh_info;
10871 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10872 local_got_refcounts = (bfd_signed_vma *)
10873 bfd_zalloc (abfd, size);
10874 if (local_got_refcounts == NULL)
10876 elf_local_got_refcounts (abfd) = local_got_refcounts;
10877 elf32_arm_local_got_tls_type (abfd)
10878 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10880 local_got_refcounts[r_symndx] += 1;
10881 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10884 /* We will already have issued an error message if there is a
10885 TLS / non-TLS mismatch, based on the symbol type. We don't
10886 support any linker relaxations. So just combine any TLS
10888 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10889 && tls_type != GOT_NORMAL)
10890 tls_type |= old_tls_type;
10892 if (old_tls_type != tls_type)
10895 elf32_arm_hash_entry (h)->tls_type = tls_type;
10897 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10900 /* Fall through. */
10902 case R_ARM_TLS_LDM32:
10903 if (r_type == R_ARM_TLS_LDM32)
10904 htab->tls_ldm_got.refcount++;
10905 /* Fall through. */
10907 case R_ARM_GOTOFF32:
10909 if (htab->sgot == NULL)
10911 if (htab->root.dynobj == NULL)
10912 htab->root.dynobj = abfd;
10913 if (!create_got_section (htab->root.dynobj, info))
10919 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10920 ldr __GOTT_INDEX__ offsets. */
10921 if (!htab->vxworks_p)
10923 /* Fall through. */
10930 case R_ARM_THM_CALL:
10931 case R_ARM_THM_JUMP24:
10932 case R_ARM_THM_JUMP19:
10936 case R_ARM_MOVW_ABS_NC:
10937 case R_ARM_MOVT_ABS:
10938 case R_ARM_THM_MOVW_ABS_NC:
10939 case R_ARM_THM_MOVT_ABS:
10942 (*_bfd_error_handler)
10943 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10944 abfd, elf32_arm_howto_table_1[r_type].name,
10945 (h) ? h->root.root.string : "a local symbol");
10946 bfd_set_error (bfd_error_bad_value);
10950 /* Fall through. */
10952 case R_ARM_ABS32_NOI:
10954 case R_ARM_REL32_NOI:
10955 case R_ARM_MOVW_PREL_NC:
10956 case R_ARM_MOVT_PREL:
10957 case R_ARM_THM_MOVW_PREL_NC:
10958 case R_ARM_THM_MOVT_PREL:
10962 /* Should the interworking branches be listed here? */
10965 /* If this reloc is in a read-only section, we might
10966 need a copy reloc. We can't check reliably at this
10967 stage whether the section is read-only, as input
10968 sections have not yet been mapped to output sections.
10969 Tentatively set the flag for now, and correct in
10970 adjust_dynamic_symbol. */
10972 h->non_got_ref = 1;
10974 /* We may need a .plt entry if the function this reloc
10975 refers to is in a different object. We can't tell for
10976 sure yet, because something later might force the
10981 /* If we create a PLT entry, this relocation will reference
10982 it, even if it's an ABS32 relocation. */
10983 h->plt.refcount += 1;
10985 /* It's too early to use htab->use_blx here, so we have to
10986 record possible blx references separately from
10987 relocs that definitely need a thumb stub. */
10989 if (r_type == R_ARM_THM_CALL)
10990 eh->plt_maybe_thumb_refcount += 1;
10992 if (r_type == R_ARM_THM_JUMP24
10993 || r_type == R_ARM_THM_JUMP19)
10994 eh->plt_thumb_refcount += 1;
10997 /* If we are creating a shared library or relocatable executable,
10998 and this is a reloc against a global symbol, or a non PC
10999 relative reloc against a local symbol, then we need to copy
11000 the reloc into the shared library. However, if we are linking
11001 with -Bsymbolic, we do not need to copy a reloc against a
11002 global symbol which is defined in an object we are
11003 including in the link (i.e., DEF_REGULAR is set). At
11004 this point we have not seen all the input files, so it is
11005 possible that DEF_REGULAR is not set now but will be set
11006 later (it is never cleared). We account for that
11007 possibility below by storing information in the
11008 relocs_copied field of the hash table entry. */
11009 if ((info->shared || htab->root.is_relocatable_executable)
11010 && (sec->flags & SEC_ALLOC) != 0
11011 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
11012 || (h != NULL && ! h->needs_plt
11013 && (! info->symbolic || ! h->def_regular))))
11015 struct elf32_arm_relocs_copied *p, **head;
11017 /* When creating a shared object, we must copy these
11018 reloc types into the output file. We create a reloc
11019 section in dynobj and make room for this reloc. */
11020 if (sreloc == NULL)
11022 sreloc = _bfd_elf_make_dynamic_reloc_section
11023 (sec, dynobj, 2, abfd, ! htab->use_rel);
11025 if (sreloc == NULL)
11028 /* BPABI objects never have dynamic relocations mapped. */
11029 if (htab->symbian_p)
11033 flags = bfd_get_section_flags (dynobj, sreloc);
11034 flags &= ~(SEC_LOAD | SEC_ALLOC);
11035 bfd_set_section_flags (dynobj, sreloc, flags);
11039 /* If this is a global symbol, we count the number of
11040 relocations we need for this symbol. */
11043 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11047 /* Track dynamic relocs needed for local syms too.
11048 We really need local syms available to do this
11049 easily. Oh well. */
11052 Elf_Internal_Sym *isym;
11054 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11059 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11063 vpp = &elf_section_data (s)->local_dynrel;
11064 head = (struct elf32_arm_relocs_copied **) vpp;
11068 if (p == NULL || p->section != sec)
11070 bfd_size_type amt = sizeof *p;
11072 p = (struct elf32_arm_relocs_copied *)
11073 bfd_alloc (htab->root.dynobj, amt);
11083 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11089 /* This relocation describes the C++ object vtable hierarchy.
11090 Reconstruct it for later use during GC. */
11091 case R_ARM_GNU_VTINHERIT:
11092 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11096 /* This relocation describes which C++ vtable entries are actually
11097 used. Record for later use during GC. */
11098 case R_ARM_GNU_VTENTRY:
11099 BFD_ASSERT (h != NULL);
11101 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11110 /* Unwinding tables are not referenced directly. This pass marks them as
11111 required if the corresponding code section is marked. */
11114 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11115 elf_gc_mark_hook_fn gc_mark_hook)
11118 Elf_Internal_Shdr **elf_shdrp;
11121 /* Marking EH data may cause additional code sections to be marked,
11122 requiring multiple passes. */
11127 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11131 if (! is_arm_elf (sub))
11134 elf_shdrp = elf_elfsections (sub);
11135 for (o = sub->sections; o != NULL; o = o->next)
11137 Elf_Internal_Shdr *hdr;
11139 hdr = &elf_section_data (o)->this_hdr;
11140 if (hdr->sh_type == SHT_ARM_EXIDX
11142 && hdr->sh_link < elf_numsections (sub)
11144 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11147 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11157 /* Treat mapping symbols as special target symbols. */
11160 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11162 return bfd_is_arm_special_symbol_name (sym->name,
11163 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11166 /* This is a copy of elf_find_function() from elf.c except that
11167 ARM mapping symbols are ignored when looking for function names
11168 and STT_ARM_TFUNC is considered to a function type. */
11171 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11172 asection * section,
11173 asymbol ** symbols,
11175 const char ** filename_ptr,
11176 const char ** functionname_ptr)
11178 const char * filename = NULL;
11179 asymbol * func = NULL;
11180 bfd_vma low_func = 0;
11183 for (p = symbols; *p != NULL; p++)
11185 elf_symbol_type *q;
11187 q = (elf_symbol_type *) *p;
11189 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11194 filename = bfd_asymbol_name (&q->symbol);
11197 case STT_ARM_TFUNC:
11199 /* Skip mapping symbols. */
11200 if ((q->symbol.flags & BSF_LOCAL)
11201 && bfd_is_arm_special_symbol_name (q->symbol.name,
11202 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11204 /* Fall through. */
11205 if (bfd_get_section (&q->symbol) == section
11206 && q->symbol.value >= low_func
11207 && q->symbol.value <= offset)
11209 func = (asymbol *) q;
11210 low_func = q->symbol.value;
11220 *filename_ptr = filename;
11221 if (functionname_ptr)
11222 *functionname_ptr = bfd_asymbol_name (func);
11228 /* Find the nearest line to a particular section and offset, for error
11229 reporting. This code is a duplicate of the code in elf.c, except
11230 that it uses arm_elf_find_function. */
11233 elf32_arm_find_nearest_line (bfd * abfd,
11234 asection * section,
11235 asymbol ** symbols,
11237 const char ** filename_ptr,
11238 const char ** functionname_ptr,
11239 unsigned int * line_ptr)
11241 bfd_boolean found = FALSE;
11243 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11245 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11246 filename_ptr, functionname_ptr,
11248 & elf_tdata (abfd)->dwarf2_find_line_info))
11250 if (!*functionname_ptr)
11251 arm_elf_find_function (abfd, section, symbols, offset,
11252 *filename_ptr ? NULL : filename_ptr,
11258 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11259 & found, filename_ptr,
11260 functionname_ptr, line_ptr,
11261 & elf_tdata (abfd)->line_info))
11264 if (found && (*functionname_ptr || *line_ptr))
11267 if (symbols == NULL)
11270 if (! arm_elf_find_function (abfd, section, symbols, offset,
11271 filename_ptr, functionname_ptr))
11279 elf32_arm_find_inliner_info (bfd * abfd,
11280 const char ** filename_ptr,
11281 const char ** functionname_ptr,
11282 unsigned int * line_ptr)
11285 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11286 functionname_ptr, line_ptr,
11287 & elf_tdata (abfd)->dwarf2_find_line_info);
11291 /* Adjust a symbol defined by a dynamic object and referenced by a
11292 regular object. The current definition is in some section of the
11293 dynamic object, but we're not including those sections. We have to
11294 change the definition to something the rest of the link can
11298 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11299 struct elf_link_hash_entry * h)
11303 struct elf32_arm_link_hash_entry * eh;
11304 struct elf32_arm_link_hash_table *globals;
11306 globals = elf32_arm_hash_table (info);
11307 if (globals == NULL)
11310 dynobj = elf_hash_table (info)->dynobj;
11312 /* Make sure we know what is going on here. */
11313 BFD_ASSERT (dynobj != NULL
11315 || h->u.weakdef != NULL
11318 && !h->def_regular)));
11320 eh = (struct elf32_arm_link_hash_entry *) h;
11322 /* If this is a function, put it in the procedure linkage table. We
11323 will fill in the contents of the procedure linkage table later,
11324 when we know the address of the .got section. */
11325 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11328 if (h->plt.refcount <= 0
11329 || SYMBOL_CALLS_LOCAL (info, h)
11330 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11331 && h->root.type == bfd_link_hash_undefweak))
11333 /* This case can occur if we saw a PLT32 reloc in an input
11334 file, but the symbol was never referred to by a dynamic
11335 object, or if all references were garbage collected. In
11336 such a case, we don't actually need to build a procedure
11337 linkage table, and we can just do a PC24 reloc instead. */
11338 h->plt.offset = (bfd_vma) -1;
11339 eh->plt_thumb_refcount = 0;
11340 eh->plt_maybe_thumb_refcount = 0;
11348 /* It's possible that we incorrectly decided a .plt reloc was
11349 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11350 in check_relocs. We can't decide accurately between function
11351 and non-function syms in check-relocs; Objects loaded later in
11352 the link may change h->type. So fix it now. */
11353 h->plt.offset = (bfd_vma) -1;
11354 eh->plt_thumb_refcount = 0;
11355 eh->plt_maybe_thumb_refcount = 0;
11358 /* If this is a weak symbol, and there is a real definition, the
11359 processor independent code will have arranged for us to see the
11360 real definition first, and we can just use the same value. */
11361 if (h->u.weakdef != NULL)
11363 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11364 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11365 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11366 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11370 /* If there are no non-GOT references, we do not need a copy
11372 if (!h->non_got_ref)
11375 /* This is a reference to a symbol defined by a dynamic object which
11376 is not a function. */
11378 /* If we are creating a shared library, we must presume that the
11379 only references to the symbol are via the global offset table.
11380 For such cases we need not do anything here; the relocations will
11381 be handled correctly by relocate_section. Relocatable executables
11382 can reference data in shared objects directly, so we don't need to
11383 do anything here. */
11384 if (info->shared || globals->root.is_relocatable_executable)
11389 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11390 h->root.root.string);
11394 /* We must allocate the symbol in our .dynbss section, which will
11395 become part of the .bss section of the executable. There will be
11396 an entry for this symbol in the .dynsym section. The dynamic
11397 object will contain position independent code, so all references
11398 from the dynamic object to this symbol will go through the global
11399 offset table. The dynamic linker will use the .dynsym entry to
11400 determine the address it must put in the global offset table, so
11401 both the dynamic object and the regular object will refer to the
11402 same memory location for the variable. */
11403 s = bfd_get_section_by_name (dynobj, ".dynbss");
11404 BFD_ASSERT (s != NULL);
11406 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11407 copy the initial value out of the dynamic object and into the
11408 runtime process image. We need to remember the offset into the
11409 .rel(a).bss section we are going to use. */
11410 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11414 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11415 BFD_ASSERT (srel != NULL);
11416 srel->size += RELOC_SIZE (globals);
11420 return _bfd_elf_adjust_dynamic_copy (h, s);
11423 /* Allocate space in .plt, .got and associated reloc sections for
11427 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11429 struct bfd_link_info *info;
11430 struct elf32_arm_link_hash_table *htab;
11431 struct elf32_arm_link_hash_entry *eh;
11432 struct elf32_arm_relocs_copied *p;
11433 bfd_signed_vma thumb_refs;
11435 eh = (struct elf32_arm_link_hash_entry *) h;
11437 if (h->root.type == bfd_link_hash_indirect)
11440 if (h->root.type == bfd_link_hash_warning)
11441 /* When warning symbols are created, they **replace** the "real"
11442 entry in the hash table, thus we never get to see the real
11443 symbol in a hash traversal. So look at it now. */
11444 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11446 info = (struct bfd_link_info *) inf;
11447 htab = elf32_arm_hash_table (info);
11451 if (htab->root.dynamic_sections_created
11452 && h->plt.refcount > 0)
11454 /* Make sure this symbol is output as a dynamic symbol.
11455 Undefined weak syms won't yet be marked as dynamic. */
11456 if (h->dynindx == -1
11457 && !h->forced_local)
11459 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11464 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11466 asection *s = htab->splt;
11468 /* If this is the first .plt entry, make room for the special
11471 s->size += htab->plt_header_size;
11473 h->plt.offset = s->size;
11475 /* If we will insert a Thumb trampoline before this PLT, leave room
11477 thumb_refs = eh->plt_thumb_refcount;
11478 if (!htab->use_blx)
11479 thumb_refs += eh->plt_maybe_thumb_refcount;
11481 if (thumb_refs > 0)
11483 h->plt.offset += PLT_THUMB_STUB_SIZE;
11484 s->size += PLT_THUMB_STUB_SIZE;
11487 /* If this symbol is not defined in a regular file, and we are
11488 not generating a shared library, then set the symbol to this
11489 location in the .plt. This is required to make function
11490 pointers compare as equal between the normal executable and
11491 the shared library. */
11493 && !h->def_regular)
11495 h->root.u.def.section = s;
11496 h->root.u.def.value = h->plt.offset;
11498 /* Make sure the function is not marked as Thumb, in case
11499 it is the target of an ABS32 relocation, which will
11500 point to the PLT entry. */
11501 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11502 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11505 /* Make room for this entry. */
11506 s->size += htab->plt_entry_size;
11508 if (!htab->symbian_p)
11510 /* We also need to make an entry in the .got.plt section, which
11511 will be placed in the .got section by the linker script. */
11512 eh->plt_got_offset = htab->sgotplt->size;
11513 htab->sgotplt->size += 4;
11516 /* We also need to make an entry in the .rel(a).plt section. */
11517 htab->srelplt->size += RELOC_SIZE (htab);
11519 /* VxWorks executables have a second set of relocations for
11520 each PLT entry. They go in a separate relocation section,
11521 which is processed by the kernel loader. */
11522 if (htab->vxworks_p && !info->shared)
11524 /* There is a relocation for the initial PLT entry:
11525 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11526 if (h->plt.offset == htab->plt_header_size)
11527 htab->srelplt2->size += RELOC_SIZE (htab);
11529 /* There are two extra relocations for each subsequent
11530 PLT entry: an R_ARM_32 relocation for the GOT entry,
11531 and an R_ARM_32 relocation for the PLT entry. */
11532 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11537 h->plt.offset = (bfd_vma) -1;
11543 h->plt.offset = (bfd_vma) -1;
11547 if (h->got.refcount > 0)
11551 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11554 /* Make sure this symbol is output as a dynamic symbol.
11555 Undefined weak syms won't yet be marked as dynamic. */
11556 if (h->dynindx == -1
11557 && !h->forced_local)
11559 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11563 if (!htab->symbian_p)
11566 h->got.offset = s->size;
11568 if (tls_type == GOT_UNKNOWN)
11571 if (tls_type == GOT_NORMAL)
11572 /* Non-TLS symbols need one GOT slot. */
11576 if (tls_type & GOT_TLS_GD)
11577 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11579 if (tls_type & GOT_TLS_IE)
11580 /* R_ARM_TLS_IE32 needs one GOT slot. */
11584 dyn = htab->root.dynamic_sections_created;
11587 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11589 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11592 if (tls_type != GOT_NORMAL
11593 && (info->shared || indx != 0)
11594 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11595 || h->root.type != bfd_link_hash_undefweak))
11597 if (tls_type & GOT_TLS_IE)
11598 htab->srelgot->size += RELOC_SIZE (htab);
11600 if (tls_type & GOT_TLS_GD)
11601 htab->srelgot->size += RELOC_SIZE (htab);
11603 if ((tls_type & GOT_TLS_GD) && indx != 0)
11604 htab->srelgot->size += RELOC_SIZE (htab);
11606 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11607 || h->root.type != bfd_link_hash_undefweak)
11609 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11610 htab->srelgot->size += RELOC_SIZE (htab);
11614 h->got.offset = (bfd_vma) -1;
11616 /* Allocate stubs for exported Thumb functions on v4t. */
11617 if (!htab->use_blx && h->dynindx != -1
11619 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11620 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11622 struct elf_link_hash_entry * th;
11623 struct bfd_link_hash_entry * bh;
11624 struct elf_link_hash_entry * myh;
11628 /* Create a new symbol to regist the real location of the function. */
11629 s = h->root.u.def.section;
11630 sprintf (name, "__real_%s", h->root.root.string);
11631 _bfd_generic_link_add_one_symbol (info, s->owner,
11632 name, BSF_GLOBAL, s,
11633 h->root.u.def.value,
11634 NULL, TRUE, FALSE, &bh);
11636 myh = (struct elf_link_hash_entry *) bh;
11637 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11638 myh->forced_local = 1;
11639 eh->export_glue = myh;
11640 th = record_arm_to_thumb_glue (info, h);
11641 /* Point the symbol at the stub. */
11642 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11643 h->root.u.def.section = th->root.u.def.section;
11644 h->root.u.def.value = th->root.u.def.value & ~1;
11647 if (eh->relocs_copied == NULL)
11650 /* In the shared -Bsymbolic case, discard space allocated for
11651 dynamic pc-relative relocs against symbols which turn out to be
11652 defined in regular objects. For the normal shared case, discard
11653 space for pc-relative relocs that have become local due to symbol
11654 visibility changes. */
11656 if (info->shared || htab->root.is_relocatable_executable)
11658 /* The only relocs that use pc_count are R_ARM_REL32 and
11659 R_ARM_REL32_NOI, which will appear on something like
11660 ".long foo - .". We want calls to protected symbols to resolve
11661 directly to the function rather than going via the plt. If people
11662 want function pointer comparisons to work as expected then they
11663 should avoid writing assembly like ".long foo - .". */
11664 if (SYMBOL_CALLS_LOCAL (info, h))
11666 struct elf32_arm_relocs_copied **pp;
11668 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11670 p->count -= p->pc_count;
11679 if (htab->vxworks_p)
11681 struct elf32_arm_relocs_copied **pp;
11683 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11685 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11692 /* Also discard relocs on undefined weak syms with non-default
11694 if (eh->relocs_copied != NULL
11695 && h->root.type == bfd_link_hash_undefweak)
11697 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11698 eh->relocs_copied = NULL;
11700 /* Make sure undefined weak symbols are output as a dynamic
11702 else if (h->dynindx == -1
11703 && !h->forced_local)
11705 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11710 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11711 && h->root.type == bfd_link_hash_new)
11713 /* Output absolute symbols so that we can create relocations
11714 against them. For normal symbols we output a relocation
11715 against the section that contains them. */
11716 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11723 /* For the non-shared case, discard space for relocs against
11724 symbols which turn out to need copy relocs or are not
11727 if (!h->non_got_ref
11728 && ((h->def_dynamic
11729 && !h->def_regular)
11730 || (htab->root.dynamic_sections_created
11731 && (h->root.type == bfd_link_hash_undefweak
11732 || h->root.type == bfd_link_hash_undefined))))
11734 /* Make sure this symbol is output as a dynamic symbol.
11735 Undefined weak syms won't yet be marked as dynamic. */
11736 if (h->dynindx == -1
11737 && !h->forced_local)
11739 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11743 /* If that succeeded, we know we'll be keeping all the
11745 if (h->dynindx != -1)
11749 eh->relocs_copied = NULL;
11754 /* Finally, allocate space. */
11755 for (p = eh->relocs_copied; p != NULL; p = p->next)
11757 asection *sreloc = elf_section_data (p->section)->sreloc;
11758 sreloc->size += p->count * RELOC_SIZE (htab);
11764 /* Find any dynamic relocs that apply to read-only sections. */
11767 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11769 struct elf32_arm_link_hash_entry * eh;
11770 struct elf32_arm_relocs_copied * p;
11772 if (h->root.type == bfd_link_hash_warning)
11773 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11775 eh = (struct elf32_arm_link_hash_entry *) h;
11776 for (p = eh->relocs_copied; p != NULL; p = p->next)
11778 asection *s = p->section;
11780 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11782 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11784 info->flags |= DF_TEXTREL;
11786 /* Not an error, just cut short the traversal. */
11794 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11797 struct elf32_arm_link_hash_table *globals;
11799 globals = elf32_arm_hash_table (info);
11800 if (globals == NULL)
11803 globals->byteswap_code = byteswap_code;
11806 /* Set the sizes of the dynamic sections. */
11809 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11810 struct bfd_link_info * info)
11815 bfd_boolean relocs;
11817 struct elf32_arm_link_hash_table *htab;
11819 htab = elf32_arm_hash_table (info);
11823 dynobj = elf_hash_table (info)->dynobj;
11824 BFD_ASSERT (dynobj != NULL);
11825 check_use_blx (htab);
11827 if (elf_hash_table (info)->dynamic_sections_created)
11829 /* Set the contents of the .interp section to the interpreter. */
11830 if (info->executable)
11832 s = bfd_get_section_by_name (dynobj, ".interp");
11833 BFD_ASSERT (s != NULL);
11834 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11835 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11839 /* Set up .got offsets for local syms, and space for local dynamic
11841 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11843 bfd_signed_vma *local_got;
11844 bfd_signed_vma *end_local_got;
11845 char *local_tls_type;
11846 bfd_size_type locsymcount;
11847 Elf_Internal_Shdr *symtab_hdr;
11849 bfd_boolean is_vxworks = htab->vxworks_p;
11851 if (! is_arm_elf (ibfd))
11854 for (s = ibfd->sections; s != NULL; s = s->next)
11856 struct elf32_arm_relocs_copied *p;
11858 for (p = (struct elf32_arm_relocs_copied *)
11859 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11861 if (!bfd_is_abs_section (p->section)
11862 && bfd_is_abs_section (p->section->output_section))
11864 /* Input section has been discarded, either because
11865 it is a copy of a linkonce section or due to
11866 linker script /DISCARD/, so we'll be discarding
11869 else if (is_vxworks
11870 && strcmp (p->section->output_section->name,
11873 /* Relocations in vxworks .tls_vars sections are
11874 handled specially by the loader. */
11876 else if (p->count != 0)
11878 srel = elf_section_data (p->section)->sreloc;
11879 srel->size += p->count * RELOC_SIZE (htab);
11880 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11881 info->flags |= DF_TEXTREL;
11886 local_got = elf_local_got_refcounts (ibfd);
11890 symtab_hdr = & elf_symtab_hdr (ibfd);
11891 locsymcount = symtab_hdr->sh_info;
11892 end_local_got = local_got + locsymcount;
11893 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11895 srel = htab->srelgot;
11896 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11898 if (*local_got > 0)
11900 *local_got = s->size;
11901 if (*local_tls_type & GOT_TLS_GD)
11902 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11904 if (*local_tls_type & GOT_TLS_IE)
11906 if (*local_tls_type == GOT_NORMAL)
11909 if (info->shared || *local_tls_type == GOT_TLS_GD)
11910 srel->size += RELOC_SIZE (htab);
11913 *local_got = (bfd_vma) -1;
11917 if (htab->tls_ldm_got.refcount > 0)
11919 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11920 for R_ARM_TLS_LDM32 relocations. */
11921 htab->tls_ldm_got.offset = htab->sgot->size;
11922 htab->sgot->size += 8;
11924 htab->srelgot->size += RELOC_SIZE (htab);
11927 htab->tls_ldm_got.offset = -1;
11929 /* Allocate global sym .plt and .got entries, and space for global
11930 sym dynamic relocs. */
11931 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11933 /* Here we rummage through the found bfds to collect glue information. */
11934 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11936 if (! is_arm_elf (ibfd))
11939 /* Initialise mapping tables for code/data. */
11940 bfd_elf32_arm_init_maps (ibfd);
11942 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11943 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11944 /* xgettext:c-format */
11945 _bfd_error_handler (_("Errors encountered processing file %s"),
11949 /* Allocate space for the glue sections now that we've sized them. */
11950 bfd_elf32_arm_allocate_interworking_sections (info);
11952 /* The check_relocs and adjust_dynamic_symbol entry points have
11953 determined the sizes of the various dynamic sections. Allocate
11954 memory for them. */
11957 for (s = dynobj->sections; s != NULL; s = s->next)
11961 if ((s->flags & SEC_LINKER_CREATED) == 0)
11964 /* It's OK to base decisions on the section name, because none
11965 of the dynobj section names depend upon the input files. */
11966 name = bfd_get_section_name (dynobj, s);
11968 if (strcmp (name, ".plt") == 0)
11970 /* Remember whether there is a PLT. */
11971 plt = s->size != 0;
11973 else if (CONST_STRNEQ (name, ".rel"))
11977 /* Remember whether there are any reloc sections other
11978 than .rel(a).plt and .rela.plt.unloaded. */
11979 if (s != htab->srelplt && s != htab->srelplt2)
11982 /* We use the reloc_count field as a counter if we need
11983 to copy relocs into the output file. */
11984 s->reloc_count = 0;
11987 else if (! CONST_STRNEQ (name, ".got")
11988 && strcmp (name, ".dynbss") != 0)
11990 /* It's not one of our sections, so don't allocate space. */
11996 /* If we don't need this section, strip it from the
11997 output file. This is mostly to handle .rel(a).bss and
11998 .rel(a).plt. We must create both sections in
11999 create_dynamic_sections, because they must be created
12000 before the linker maps input sections to output
12001 sections. The linker does that before
12002 adjust_dynamic_symbol is called, and it is that
12003 function which decides whether anything needs to go
12004 into these sections. */
12005 s->flags |= SEC_EXCLUDE;
12009 if ((s->flags & SEC_HAS_CONTENTS) == 0)
12012 /* Allocate memory for the section contents. */
12013 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
12014 if (s->contents == NULL)
12018 if (elf_hash_table (info)->dynamic_sections_created)
12020 /* Add some entries to the .dynamic section. We fill in the
12021 values later, in elf32_arm_finish_dynamic_sections, but we
12022 must add the entries now so that we get the correct size for
12023 the .dynamic section. The DT_DEBUG entry is filled in by the
12024 dynamic linker and used by the debugger. */
12025 #define add_dynamic_entry(TAG, VAL) \
12026 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
12028 if (info->executable)
12030 if (!add_dynamic_entry (DT_DEBUG, 0))
12036 if ( !add_dynamic_entry (DT_PLTGOT, 0)
12037 || !add_dynamic_entry (DT_PLTRELSZ, 0)
12038 || !add_dynamic_entry (DT_PLTREL,
12039 htab->use_rel ? DT_REL : DT_RELA)
12040 || !add_dynamic_entry (DT_JMPREL, 0))
12048 if (!add_dynamic_entry (DT_REL, 0)
12049 || !add_dynamic_entry (DT_RELSZ, 0)
12050 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12055 if (!add_dynamic_entry (DT_RELA, 0)
12056 || !add_dynamic_entry (DT_RELASZ, 0)
12057 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12062 /* If any dynamic relocs apply to a read-only section,
12063 then we need a DT_TEXTREL entry. */
12064 if ((info->flags & DF_TEXTREL) == 0)
12065 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12068 if ((info->flags & DF_TEXTREL) != 0)
12070 if (!add_dynamic_entry (DT_TEXTREL, 0))
12073 if (htab->vxworks_p
12074 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12077 #undef add_dynamic_entry
12082 /* Finish up dynamic symbol handling. We set the contents of various
12083 dynamic sections here. */
12086 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12087 struct bfd_link_info * info,
12088 struct elf_link_hash_entry * h,
12089 Elf_Internal_Sym * sym)
12092 struct elf32_arm_link_hash_table *htab;
12093 struct elf32_arm_link_hash_entry *eh;
12095 dynobj = elf_hash_table (info)->dynobj;
12096 htab = elf32_arm_hash_table (info);
12100 eh = (struct elf32_arm_link_hash_entry *) h;
12102 if (h->plt.offset != (bfd_vma) -1)
12108 Elf_Internal_Rela rel;
12110 /* This symbol has an entry in the procedure linkage table. Set
12113 BFD_ASSERT (h->dynindx != -1);
12115 splt = bfd_get_section_by_name (dynobj, ".plt");
12116 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12117 BFD_ASSERT (splt != NULL && srel != NULL);
12119 /* Fill in the entry in the procedure linkage table. */
12120 if (htab->symbian_p)
12122 put_arm_insn (htab, output_bfd,
12123 elf32_arm_symbian_plt_entry[0],
12124 splt->contents + h->plt.offset);
12125 bfd_put_32 (output_bfd,
12126 elf32_arm_symbian_plt_entry[1],
12127 splt->contents + h->plt.offset + 4);
12129 /* Fill in the entry in the .rel.plt section. */
12130 rel.r_offset = (splt->output_section->vma
12131 + splt->output_offset
12132 + h->plt.offset + 4);
12133 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12135 /* Get the index in the procedure linkage table which
12136 corresponds to this symbol. This is the index of this symbol
12137 in all the symbols for which we are making plt entries. The
12138 first entry in the procedure linkage table is reserved. */
12139 plt_index = ((h->plt.offset - htab->plt_header_size)
12140 / htab->plt_entry_size);
12144 bfd_vma got_offset, got_address, plt_address;
12145 bfd_vma got_displacement;
12149 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12150 BFD_ASSERT (sgot != NULL);
12152 /* Get the offset into the .got.plt table of the entry that
12153 corresponds to this function. */
12154 got_offset = eh->plt_got_offset;
12156 /* Get the index in the procedure linkage table which
12157 corresponds to this symbol. This is the index of this symbol
12158 in all the symbols for which we are making plt entries. The
12159 first three entries in .got.plt are reserved; after that
12160 symbols appear in the same order as in .plt. */
12161 plt_index = (got_offset - 12) / 4;
12163 /* Calculate the address of the GOT entry. */
12164 got_address = (sgot->output_section->vma
12165 + sgot->output_offset
12168 /* ...and the address of the PLT entry. */
12169 plt_address = (splt->output_section->vma
12170 + splt->output_offset
12173 ptr = htab->splt->contents + h->plt.offset;
12174 if (htab->vxworks_p && info->shared)
12179 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12181 val = elf32_arm_vxworks_shared_plt_entry[i];
12183 val |= got_address - sgot->output_section->vma;
12185 val |= plt_index * RELOC_SIZE (htab);
12186 if (i == 2 || i == 5)
12187 bfd_put_32 (output_bfd, val, ptr);
12189 put_arm_insn (htab, output_bfd, val, ptr);
12192 else if (htab->vxworks_p)
12197 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12199 val = elf32_arm_vxworks_exec_plt_entry[i];
12201 val |= got_address;
12203 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12205 val |= plt_index * RELOC_SIZE (htab);
12206 if (i == 2 || i == 5)
12207 bfd_put_32 (output_bfd, val, ptr);
12209 put_arm_insn (htab, output_bfd, val, ptr);
12212 loc = (htab->srelplt2->contents
12213 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12215 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12216 referencing the GOT for this PLT entry. */
12217 rel.r_offset = plt_address + 8;
12218 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12219 rel.r_addend = got_offset;
12220 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12221 loc += RELOC_SIZE (htab);
12223 /* Create the R_ARM_ABS32 relocation referencing the
12224 beginning of the PLT for this GOT entry. */
12225 rel.r_offset = got_address;
12226 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12228 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12232 bfd_signed_vma thumb_refs;
12233 /* Calculate the displacement between the PLT slot and the
12234 entry in the GOT. The eight-byte offset accounts for the
12235 value produced by adding to pc in the first instruction
12236 of the PLT stub. */
12237 got_displacement = got_address - (plt_address + 8);
12239 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12241 thumb_refs = eh->plt_thumb_refcount;
12242 if (!htab->use_blx)
12243 thumb_refs += eh->plt_maybe_thumb_refcount;
12245 if (thumb_refs > 0)
12247 put_thumb_insn (htab, output_bfd,
12248 elf32_arm_plt_thumb_stub[0], ptr - 4);
12249 put_thumb_insn (htab, output_bfd,
12250 elf32_arm_plt_thumb_stub[1], ptr - 2);
12253 put_arm_insn (htab, output_bfd,
12254 elf32_arm_plt_entry[0]
12255 | ((got_displacement & 0x0ff00000) >> 20),
12257 put_arm_insn (htab, output_bfd,
12258 elf32_arm_plt_entry[1]
12259 | ((got_displacement & 0x000ff000) >> 12),
12261 put_arm_insn (htab, output_bfd,
12262 elf32_arm_plt_entry[2]
12263 | (got_displacement & 0x00000fff),
12265 #ifdef FOUR_WORD_PLT
12266 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12270 /* Fill in the entry in the global offset table. */
12271 bfd_put_32 (output_bfd,
12272 (splt->output_section->vma
12273 + splt->output_offset),
12274 sgot->contents + got_offset);
12276 /* Fill in the entry in the .rel(a).plt section. */
12278 rel.r_offset = got_address;
12279 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12282 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12283 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12285 if (!h->def_regular)
12287 /* Mark the symbol as undefined, rather than as defined in
12288 the .plt section. Leave the value alone. */
12289 sym->st_shndx = SHN_UNDEF;
12290 /* If the symbol is weak, we do need to clear the value.
12291 Otherwise, the PLT entry would provide a definition for
12292 the symbol even if the symbol wasn't defined anywhere,
12293 and so the symbol would never be NULL. */
12294 if (!h->ref_regular_nonweak)
12299 if (h->got.offset != (bfd_vma) -1
12300 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12301 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12305 Elf_Internal_Rela rel;
12309 /* This symbol has an entry in the global offset table. Set it
12311 sgot = bfd_get_section_by_name (dynobj, ".got");
12312 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12313 BFD_ASSERT (sgot != NULL && srel != NULL);
12315 offset = (h->got.offset & ~(bfd_vma) 1);
12317 rel.r_offset = (sgot->output_section->vma
12318 + sgot->output_offset
12321 /* If this is a static link, or it is a -Bsymbolic link and the
12322 symbol is defined locally or was forced to be local because
12323 of a version file, we just want to emit a RELATIVE reloc.
12324 The entry in the global offset table will already have been
12325 initialized in the relocate_section function. */
12327 && SYMBOL_REFERENCES_LOCAL (info, h))
12329 BFD_ASSERT ((h->got.offset & 1) != 0);
12330 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12331 if (!htab->use_rel)
12333 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12334 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12339 BFD_ASSERT ((h->got.offset & 1) == 0);
12340 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12341 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12344 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12345 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12351 Elf_Internal_Rela rel;
12354 /* This symbol needs a copy reloc. Set it up. */
12355 BFD_ASSERT (h->dynindx != -1
12356 && (h->root.type == bfd_link_hash_defined
12357 || h->root.type == bfd_link_hash_defweak));
12359 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12360 RELOC_SECTION (htab, ".bss"));
12361 BFD_ASSERT (s != NULL);
12364 rel.r_offset = (h->root.u.def.value
12365 + h->root.u.def.section->output_section->vma
12366 + h->root.u.def.section->output_offset);
12367 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12368 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12369 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12372 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12373 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12374 to the ".got" section. */
12375 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12376 || (!htab->vxworks_p && h == htab->root.hgot))
12377 sym->st_shndx = SHN_ABS;
12382 /* Finish up the dynamic sections. */
12385 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12390 struct elf32_arm_link_hash_table *htab;
12392 htab = elf32_arm_hash_table (info);
12396 dynobj = elf_hash_table (info)->dynobj;
12398 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12399 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12400 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12402 if (elf_hash_table (info)->dynamic_sections_created)
12405 Elf32_External_Dyn *dyncon, *dynconend;
12407 splt = bfd_get_section_by_name (dynobj, ".plt");
12408 BFD_ASSERT (splt != NULL && sdyn != NULL);
12410 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12411 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12413 for (; dyncon < dynconend; dyncon++)
12415 Elf_Internal_Dyn dyn;
12419 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12426 if (htab->vxworks_p
12427 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12428 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12433 goto get_vma_if_bpabi;
12436 goto get_vma_if_bpabi;
12439 goto get_vma_if_bpabi;
12441 name = ".gnu.version";
12442 goto get_vma_if_bpabi;
12444 name = ".gnu.version_d";
12445 goto get_vma_if_bpabi;
12447 name = ".gnu.version_r";
12448 goto get_vma_if_bpabi;
12454 name = RELOC_SECTION (htab, ".plt");
12456 s = bfd_get_section_by_name (output_bfd, name);
12457 BFD_ASSERT (s != NULL);
12458 if (!htab->symbian_p)
12459 dyn.d_un.d_ptr = s->vma;
12461 /* In the BPABI, tags in the PT_DYNAMIC section point
12462 at the file offset, not the memory address, for the
12463 convenience of the post linker. */
12464 dyn.d_un.d_ptr = s->filepos;
12465 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12469 if (htab->symbian_p)
12474 s = bfd_get_section_by_name (output_bfd,
12475 RELOC_SECTION (htab, ".plt"));
12476 BFD_ASSERT (s != NULL);
12477 dyn.d_un.d_val = s->size;
12478 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12483 if (!htab->symbian_p)
12485 /* My reading of the SVR4 ABI indicates that the
12486 procedure linkage table relocs (DT_JMPREL) should be
12487 included in the overall relocs (DT_REL). This is
12488 what Solaris does. However, UnixWare can not handle
12489 that case. Therefore, we override the DT_RELSZ entry
12490 here to make it not include the JMPREL relocs. Since
12491 the linker script arranges for .rel(a).plt to follow all
12492 other relocation sections, we don't have to worry
12493 about changing the DT_REL entry. */
12494 s = bfd_get_section_by_name (output_bfd,
12495 RELOC_SECTION (htab, ".plt"));
12497 dyn.d_un.d_val -= s->size;
12498 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12501 /* Fall through. */
12505 /* In the BPABI, the DT_REL tag must point at the file
12506 offset, not the VMA, of the first relocation
12507 section. So, we use code similar to that in
12508 elflink.c, but do not check for SHF_ALLOC on the
12509 relcoation section, since relocations sections are
12510 never allocated under the BPABI. The comments above
12511 about Unixware notwithstanding, we include all of the
12512 relocations here. */
12513 if (htab->symbian_p)
12516 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12517 ? SHT_REL : SHT_RELA);
12518 dyn.d_un.d_val = 0;
12519 for (i = 1; i < elf_numsections (output_bfd); i++)
12521 Elf_Internal_Shdr *hdr
12522 = elf_elfsections (output_bfd)[i];
12523 if (hdr->sh_type == type)
12525 if (dyn.d_tag == DT_RELSZ
12526 || dyn.d_tag == DT_RELASZ)
12527 dyn.d_un.d_val += hdr->sh_size;
12528 else if ((ufile_ptr) hdr->sh_offset
12529 <= dyn.d_un.d_val - 1)
12530 dyn.d_un.d_val = hdr->sh_offset;
12533 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12537 /* Set the bottom bit of DT_INIT/FINI if the
12538 corresponding function is Thumb. */
12540 name = info->init_function;
12543 name = info->fini_function;
12545 /* If it wasn't set by elf_bfd_final_link
12546 then there is nothing to adjust. */
12547 if (dyn.d_un.d_val != 0)
12549 struct elf_link_hash_entry * eh;
12551 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12552 FALSE, FALSE, TRUE);
12554 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12556 dyn.d_un.d_val |= 1;
12557 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12564 /* Fill in the first entry in the procedure linkage table. */
12565 if (splt->size > 0 && htab->plt_header_size)
12567 const bfd_vma *plt0_entry;
12568 bfd_vma got_address, plt_address, got_displacement;
12570 /* Calculate the addresses of the GOT and PLT. */
12571 got_address = sgot->output_section->vma + sgot->output_offset;
12572 plt_address = splt->output_section->vma + splt->output_offset;
12574 if (htab->vxworks_p)
12576 /* The VxWorks GOT is relocated by the dynamic linker.
12577 Therefore, we must emit relocations rather than simply
12578 computing the values now. */
12579 Elf_Internal_Rela rel;
12581 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12582 put_arm_insn (htab, output_bfd, plt0_entry[0],
12583 splt->contents + 0);
12584 put_arm_insn (htab, output_bfd, plt0_entry[1],
12585 splt->contents + 4);
12586 put_arm_insn (htab, output_bfd, plt0_entry[2],
12587 splt->contents + 8);
12588 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12590 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12591 rel.r_offset = plt_address + 12;
12592 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12594 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12595 htab->srelplt2->contents);
12599 got_displacement = got_address - (plt_address + 16);
12601 plt0_entry = elf32_arm_plt0_entry;
12602 put_arm_insn (htab, output_bfd, plt0_entry[0],
12603 splt->contents + 0);
12604 put_arm_insn (htab, output_bfd, plt0_entry[1],
12605 splt->contents + 4);
12606 put_arm_insn (htab, output_bfd, plt0_entry[2],
12607 splt->contents + 8);
12608 put_arm_insn (htab, output_bfd, plt0_entry[3],
12609 splt->contents + 12);
12611 #ifdef FOUR_WORD_PLT
12612 /* The displacement value goes in the otherwise-unused
12613 last word of the second entry. */
12614 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12616 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12621 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12622 really seem like the right value. */
12623 if (splt->output_section->owner == output_bfd)
12624 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12626 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12628 /* Correct the .rel(a).plt.unloaded relocations. They will have
12629 incorrect symbol indexes. */
12633 num_plts = ((htab->splt->size - htab->plt_header_size)
12634 / htab->plt_entry_size);
12635 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12637 for (; num_plts; num_plts--)
12639 Elf_Internal_Rela rel;
12641 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12642 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12643 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12644 p += RELOC_SIZE (htab);
12646 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12647 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12648 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12649 p += RELOC_SIZE (htab);
12654 /* Fill in the first three entries in the global offset table. */
12657 if (sgot->size > 0)
12660 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12662 bfd_put_32 (output_bfd,
12663 sdyn->output_section->vma + sdyn->output_offset,
12665 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12666 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12669 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12676 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12678 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12679 struct elf32_arm_link_hash_table *globals;
12681 i_ehdrp = elf_elfheader (abfd);
12683 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12684 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12686 i_ehdrp->e_ident[EI_OSABI] = 0;
12687 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12691 globals = elf32_arm_hash_table (link_info);
12692 if (globals != NULL && globals->byteswap_code)
12693 i_ehdrp->e_flags |= EF_ARM_BE8;
12697 static enum elf_reloc_type_class
12698 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12700 switch ((int) ELF32_R_TYPE (rela->r_info))
12702 case R_ARM_RELATIVE:
12703 return reloc_class_relative;
12704 case R_ARM_JUMP_SLOT:
12705 return reloc_class_plt;
12707 return reloc_class_copy;
12709 return reloc_class_normal;
12713 /* Set the right machine number for an Arm ELF file. */
12716 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12718 if (hdr->sh_type == SHT_NOTE)
12719 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12725 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12727 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12730 /* Return TRUE if this is an unwinding table entry. */
12733 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12735 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12736 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12740 /* Set the type and flags for an ARM section. We do this by
12741 the section name, which is a hack, but ought to work. */
12744 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12748 name = bfd_get_section_name (abfd, sec);
12750 if (is_arm_elf_unwind_section_name (abfd, name))
12752 hdr->sh_type = SHT_ARM_EXIDX;
12753 hdr->sh_flags |= SHF_LINK_ORDER;
12758 /* Handle an ARM specific section when reading an object file. This is
12759 called when bfd_section_from_shdr finds a section with an unknown
12763 elf32_arm_section_from_shdr (bfd *abfd,
12764 Elf_Internal_Shdr * hdr,
12768 /* There ought to be a place to keep ELF backend specific flags, but
12769 at the moment there isn't one. We just keep track of the
12770 sections by their name, instead. Fortunately, the ABI gives
12771 names for all the ARM specific sections, so we will probably get
12773 switch (hdr->sh_type)
12775 case SHT_ARM_EXIDX:
12776 case SHT_ARM_PREEMPTMAP:
12777 case SHT_ARM_ATTRIBUTES:
12784 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12790 static _arm_elf_section_data *
12791 get_arm_elf_section_data (asection * sec)
12793 if (sec && sec->owner && is_arm_elf (sec->owner))
12794 return elf32_arm_section_data (sec);
12802 struct bfd_link_info *info;
12805 int (*func) (void *, const char *, Elf_Internal_Sym *,
12806 asection *, struct elf_link_hash_entry *);
12807 } output_arch_syminfo;
12809 enum map_symbol_type
12817 /* Output a single mapping symbol. */
12820 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12821 enum map_symbol_type type,
12824 static const char *names[3] = {"$a", "$t", "$d"};
12825 Elf_Internal_Sym sym;
12827 sym.st_value = osi->sec->output_section->vma
12828 + osi->sec->output_offset
12832 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12833 sym.st_shndx = osi->sec_shndx;
12834 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12835 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12839 /* Output mapping symbols for PLT entries associated with H. */
12842 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12844 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12845 struct elf32_arm_link_hash_table *htab;
12846 struct elf32_arm_link_hash_entry *eh;
12849 if (h->root.type == bfd_link_hash_indirect)
12852 if (h->root.type == bfd_link_hash_warning)
12853 /* When warning symbols are created, they **replace** the "real"
12854 entry in the hash table, thus we never get to see the real
12855 symbol in a hash traversal. So look at it now. */
12856 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12858 if (h->plt.offset == (bfd_vma) -1)
12861 htab = elf32_arm_hash_table (osi->info);
12865 eh = (struct elf32_arm_link_hash_entry *) h;
12866 addr = h->plt.offset;
12867 if (htab->symbian_p)
12869 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12871 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12874 else if (htab->vxworks_p)
12876 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12878 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12880 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12882 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12887 bfd_signed_vma thumb_refs;
12889 thumb_refs = eh->plt_thumb_refcount;
12890 if (!htab->use_blx)
12891 thumb_refs += eh->plt_maybe_thumb_refcount;
12893 if (thumb_refs > 0)
12895 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12898 #ifdef FOUR_WORD_PLT
12899 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12901 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12904 /* A three-word PLT with no Thumb thunk contains only Arm code,
12905 so only need to output a mapping symbol for the first PLT entry and
12906 entries with thumb thunks. */
12907 if (thumb_refs > 0 || addr == 20)
12909 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12918 /* Output a single local symbol for a generated stub. */
12921 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12922 bfd_vma offset, bfd_vma size)
12924 Elf_Internal_Sym sym;
12926 sym.st_value = osi->sec->output_section->vma
12927 + osi->sec->output_offset
12929 sym.st_size = size;
12931 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12932 sym.st_shndx = osi->sec_shndx;
12933 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12937 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12940 struct elf32_arm_stub_hash_entry *stub_entry;
12941 struct bfd_link_info *info;
12942 asection *stub_sec;
12945 output_arch_syminfo *osi;
12946 const insn_sequence *template_sequence;
12947 enum stub_insn_type prev_type;
12950 enum map_symbol_type sym_type;
12952 /* Massage our args to the form they really have. */
12953 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12954 osi = (output_arch_syminfo *) in_arg;
12958 stub_sec = stub_entry->stub_sec;
12960 /* Ensure this stub is attached to the current section being
12962 if (stub_sec != osi->sec)
12965 addr = (bfd_vma) stub_entry->stub_offset;
12966 stub_name = stub_entry->output_name;
12968 template_sequence = stub_entry->stub_template;
12969 switch (template_sequence[0].type)
12972 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12977 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12978 stub_entry->stub_size))
12986 prev_type = DATA_TYPE;
12988 for (i = 0; i < stub_entry->stub_template_size; i++)
12990 switch (template_sequence[i].type)
12993 sym_type = ARM_MAP_ARM;
12998 sym_type = ARM_MAP_THUMB;
13002 sym_type = ARM_MAP_DATA;
13010 if (template_sequence[i].type != prev_type)
13012 prev_type = template_sequence[i].type;
13013 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13017 switch (template_sequence[i].type)
13041 /* Output mapping symbols for linker generated sections,
13042 and for those data-only sections that do not have a
13046 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13047 struct bfd_link_info *info,
13049 int (*func) (void *, const char *,
13050 Elf_Internal_Sym *,
13052 struct elf_link_hash_entry *))
13054 output_arch_syminfo osi;
13055 struct elf32_arm_link_hash_table *htab;
13057 bfd_size_type size;
13060 htab = elf32_arm_hash_table (info);
13064 check_use_blx (htab);
13070 /* Add a $d mapping symbol to data-only sections that
13071 don't have any mapping symbol. This may result in (harmless) redundant
13072 mapping symbols. */
13073 for (input_bfd = info->input_bfds;
13075 input_bfd = input_bfd->link_next)
13077 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13078 for (osi.sec = input_bfd->sections;
13080 osi.sec = osi.sec->next)
13082 if (osi.sec->output_section != NULL
13083 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13085 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13086 == SEC_HAS_CONTENTS
13087 && get_arm_elf_section_data (osi.sec) != NULL
13088 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13089 && osi.sec->size > 0)
13091 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13092 (output_bfd, osi.sec->output_section);
13093 if (osi.sec_shndx != (int)SHN_BAD)
13094 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13099 /* ARM->Thumb glue. */
13100 if (htab->arm_glue_size > 0)
13102 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13103 ARM2THUMB_GLUE_SECTION_NAME);
13105 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13106 (output_bfd, osi.sec->output_section);
13107 if (info->shared || htab->root.is_relocatable_executable
13108 || htab->pic_veneer)
13109 size = ARM2THUMB_PIC_GLUE_SIZE;
13110 else if (htab->use_blx)
13111 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13113 size = ARM2THUMB_STATIC_GLUE_SIZE;
13115 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13117 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13118 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13122 /* Thumb->ARM glue. */
13123 if (htab->thumb_glue_size > 0)
13125 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13126 THUMB2ARM_GLUE_SECTION_NAME);
13128 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13129 (output_bfd, osi.sec->output_section);
13130 size = THUMB2ARM_GLUE_SIZE;
13132 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13134 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13135 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13139 /* ARMv4 BX veneers. */
13140 if (htab->bx_glue_size > 0)
13142 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13143 ARM_BX_GLUE_SECTION_NAME);
13145 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13146 (output_bfd, osi.sec->output_section);
13148 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13151 /* Long calls stubs. */
13152 if (htab->stub_bfd && htab->stub_bfd->sections)
13154 asection* stub_sec;
13156 for (stub_sec = htab->stub_bfd->sections;
13158 stub_sec = stub_sec->next)
13160 /* Ignore non-stub sections. */
13161 if (!strstr (stub_sec->name, STUB_SUFFIX))
13164 osi.sec = stub_sec;
13166 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13167 (output_bfd, osi.sec->output_section);
13169 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13173 /* Finally, output mapping symbols for the PLT. */
13174 if (!htab->splt || htab->splt->size == 0)
13177 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13178 htab->splt->output_section);
13179 osi.sec = htab->splt;
13180 /* Output mapping symbols for the plt header. SymbianOS does not have a
13182 if (htab->vxworks_p)
13184 /* VxWorks shared libraries have no PLT header. */
13187 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13189 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13193 else if (!htab->symbian_p)
13195 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13197 #ifndef FOUR_WORD_PLT
13198 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13203 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13207 /* Allocate target specific section data. */
13210 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13212 if (!sec->used_by_bfd)
13214 _arm_elf_section_data *sdata;
13215 bfd_size_type amt = sizeof (*sdata);
13217 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13220 sec->used_by_bfd = sdata;
13223 return _bfd_elf_new_section_hook (abfd, sec);
13227 /* Used to order a list of mapping symbols by address. */
13230 elf32_arm_compare_mapping (const void * a, const void * b)
13232 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13233 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13235 if (amap->vma > bmap->vma)
13237 else if (amap->vma < bmap->vma)
13239 else if (amap->type > bmap->type)
13240 /* Ensure results do not depend on the host qsort for objects with
13241 multiple mapping symbols at the same address by sorting on type
13244 else if (amap->type < bmap->type)
13250 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13252 static unsigned long
13253 offset_prel31 (unsigned long addr, bfd_vma offset)
13255 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13258 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13262 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13264 unsigned long first_word = bfd_get_32 (output_bfd, from);
13265 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13267 /* High bit of first word is supposed to be zero. */
13268 if ((first_word & 0x80000000ul) == 0)
13269 first_word = offset_prel31 (first_word, offset);
13271 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13272 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13273 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13274 second_word = offset_prel31 (second_word, offset);
13276 bfd_put_32 (output_bfd, first_word, to);
13277 bfd_put_32 (output_bfd, second_word, to + 4);
13280 /* Data for make_branch_to_a8_stub(). */
13282 struct a8_branch_to_stub_data {
13283 asection *writing_section;
13284 bfd_byte *contents;
13288 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13289 places for a particular section. */
13292 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13295 struct elf32_arm_stub_hash_entry *stub_entry;
13296 struct a8_branch_to_stub_data *data;
13297 bfd_byte *contents;
13298 unsigned long branch_insn;
13299 bfd_vma veneered_insn_loc, veneer_entry_loc;
13300 bfd_signed_vma branch_offset;
13302 unsigned int target;
13304 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13305 data = (struct a8_branch_to_stub_data *) in_arg;
13307 if (stub_entry->target_section != data->writing_section
13308 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13311 contents = data->contents;
13313 veneered_insn_loc = stub_entry->target_section->output_section->vma
13314 + stub_entry->target_section->output_offset
13315 + stub_entry->target_value;
13317 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13318 + stub_entry->stub_sec->output_offset
13319 + stub_entry->stub_offset;
13321 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13322 veneered_insn_loc &= ~3u;
13324 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13326 abfd = stub_entry->target_section->owner;
13327 target = stub_entry->target_value;
13329 /* We attempt to avoid this condition by setting stubs_always_after_branch
13330 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13331 This check is just to be on the safe side... */
13332 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13334 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13335 "allocated in unsafe location"), abfd);
13339 switch (stub_entry->stub_type)
13341 case arm_stub_a8_veneer_b:
13342 case arm_stub_a8_veneer_b_cond:
13343 branch_insn = 0xf0009000;
13346 case arm_stub_a8_veneer_blx:
13347 branch_insn = 0xf000e800;
13350 case arm_stub_a8_veneer_bl:
13352 unsigned int i1, j1, i2, j2, s;
13354 branch_insn = 0xf000d000;
13357 if (branch_offset < -16777216 || branch_offset > 16777214)
13359 /* There's not much we can do apart from complain if this
13361 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13362 "of range (input file too large)"), abfd);
13366 /* i1 = not(j1 eor s), so:
13368 j1 = (not i1) eor s. */
13370 branch_insn |= (branch_offset >> 1) & 0x7ff;
13371 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13372 i2 = (branch_offset >> 22) & 1;
13373 i1 = (branch_offset >> 23) & 1;
13374 s = (branch_offset >> 24) & 1;
13377 branch_insn |= j2 << 11;
13378 branch_insn |= j1 << 13;
13379 branch_insn |= s << 26;
13388 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13389 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13394 /* Do code byteswapping. Return FALSE afterwards so that the section is
13395 written out as normal. */
13398 elf32_arm_write_section (bfd *output_bfd,
13399 struct bfd_link_info *link_info,
13401 bfd_byte *contents)
13403 unsigned int mapcount, errcount;
13404 _arm_elf_section_data *arm_data;
13405 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13406 elf32_arm_section_map *map;
13407 elf32_vfp11_erratum_list *errnode;
13410 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13414 if (globals == NULL)
13417 /* If this section has not been allocated an _arm_elf_section_data
13418 structure then we cannot record anything. */
13419 arm_data = get_arm_elf_section_data (sec);
13420 if (arm_data == NULL)
13423 mapcount = arm_data->mapcount;
13424 map = arm_data->map;
13425 errcount = arm_data->erratumcount;
13429 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13431 for (errnode = arm_data->erratumlist; errnode != 0;
13432 errnode = errnode->next)
13434 bfd_vma target = errnode->vma - offset;
13436 switch (errnode->type)
13438 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13440 bfd_vma branch_to_veneer;
13441 /* Original condition code of instruction, plus bit mask for
13442 ARM B instruction. */
13443 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13446 /* The instruction is before the label. */
13449 /* Above offset included in -4 below. */
13450 branch_to_veneer = errnode->u.b.veneer->vma
13451 - errnode->vma - 4;
13453 if ((signed) branch_to_veneer < -(1 << 25)
13454 || (signed) branch_to_veneer >= (1 << 25))
13455 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13456 "range"), output_bfd);
13458 insn |= (branch_to_veneer >> 2) & 0xffffff;
13459 contents[endianflip ^ target] = insn & 0xff;
13460 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13461 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13462 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13466 case VFP11_ERRATUM_ARM_VENEER:
13468 bfd_vma branch_from_veneer;
13471 /* Take size of veneer into account. */
13472 branch_from_veneer = errnode->u.v.branch->vma
13473 - errnode->vma - 12;
13475 if ((signed) branch_from_veneer < -(1 << 25)
13476 || (signed) branch_from_veneer >= (1 << 25))
13477 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13478 "range"), output_bfd);
13480 /* Original instruction. */
13481 insn = errnode->u.v.branch->u.b.vfp_insn;
13482 contents[endianflip ^ target] = insn & 0xff;
13483 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13484 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13485 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13487 /* Branch back to insn after original insn. */
13488 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13489 contents[endianflip ^ (target + 4)] = insn & 0xff;
13490 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13491 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13492 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13502 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13504 arm_unwind_table_edit *edit_node
13505 = arm_data->u.exidx.unwind_edit_list;
13506 /* Now, sec->size is the size of the section we will write. The original
13507 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13508 markers) was sec->rawsize. (This isn't the case if we perform no
13509 edits, then rawsize will be zero and we should use size). */
13510 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13511 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13512 unsigned int in_index, out_index;
13513 bfd_vma add_to_offsets = 0;
13515 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13519 unsigned int edit_index = edit_node->index;
13521 if (in_index < edit_index && in_index * 8 < input_size)
13523 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13524 contents + in_index * 8, add_to_offsets);
13528 else if (in_index == edit_index
13529 || (in_index * 8 >= input_size
13530 && edit_index == UINT_MAX))
13532 switch (edit_node->type)
13534 case DELETE_EXIDX_ENTRY:
13536 add_to_offsets += 8;
13539 case INSERT_EXIDX_CANTUNWIND_AT_END:
13541 asection *text_sec = edit_node->linked_section;
13542 bfd_vma text_offset = text_sec->output_section->vma
13543 + text_sec->output_offset
13545 bfd_vma exidx_offset = offset + out_index * 8;
13546 unsigned long prel31_offset;
13548 /* Note: this is meant to be equivalent to an
13549 R_ARM_PREL31 relocation. These synthetic
13550 EXIDX_CANTUNWIND markers are not relocated by the
13551 usual BFD method. */
13552 prel31_offset = (text_offset - exidx_offset)
13555 /* First address we can't unwind. */
13556 bfd_put_32 (output_bfd, prel31_offset,
13557 &edited_contents[out_index * 8]);
13559 /* Code for EXIDX_CANTUNWIND. */
13560 bfd_put_32 (output_bfd, 0x1,
13561 &edited_contents[out_index * 8 + 4]);
13564 add_to_offsets -= 8;
13569 edit_node = edit_node->next;
13574 /* No more edits, copy remaining entries verbatim. */
13575 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13576 contents + in_index * 8, add_to_offsets);
13582 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13583 bfd_set_section_contents (output_bfd, sec->output_section,
13585 (file_ptr) sec->output_offset, sec->size);
13590 /* Fix code to point to Cortex-A8 erratum stubs. */
13591 if (globals->fix_cortex_a8)
13593 struct a8_branch_to_stub_data data;
13595 data.writing_section = sec;
13596 data.contents = contents;
13598 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13605 if (globals->byteswap_code)
13607 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13610 for (i = 0; i < mapcount; i++)
13612 if (i == mapcount - 1)
13615 end = map[i + 1].vma;
13617 switch (map[i].type)
13620 /* Byte swap code words. */
13621 while (ptr + 3 < end)
13623 tmp = contents[ptr];
13624 contents[ptr] = contents[ptr + 3];
13625 contents[ptr + 3] = tmp;
13626 tmp = contents[ptr + 1];
13627 contents[ptr + 1] = contents[ptr + 2];
13628 contents[ptr + 2] = tmp;
13634 /* Byte swap code halfwords. */
13635 while (ptr + 1 < end)
13637 tmp = contents[ptr];
13638 contents[ptr] = contents[ptr + 1];
13639 contents[ptr + 1] = tmp;
13645 /* Leave data alone. */
13653 arm_data->mapcount = -1;
13654 arm_data->mapsize = 0;
13655 arm_data->map = NULL;
13660 /* Display STT_ARM_TFUNC symbols as functions. */
13663 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13666 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13668 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13669 elfsym->symbol.flags |= BSF_FUNCTION;
13673 /* Mangle thumb function symbols as we read them in. */
13676 elf32_arm_swap_symbol_in (bfd * abfd,
13679 Elf_Internal_Sym *dst)
13681 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13684 /* New EABI objects mark thumb function symbols by setting the low bit of
13685 the address. Turn these into STT_ARM_TFUNC. */
13686 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13687 && (dst->st_value & 1))
13689 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13690 dst->st_value &= ~(bfd_vma) 1;
13696 /* Mangle thumb function symbols as we write them out. */
13699 elf32_arm_swap_symbol_out (bfd *abfd,
13700 const Elf_Internal_Sym *src,
13704 Elf_Internal_Sym newsym;
13706 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13707 of the address set, as per the new EABI. We do this unconditionally
13708 because objcopy does not set the elf header flags until after
13709 it writes out the symbol table. */
13710 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13713 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13714 if (newsym.st_shndx != SHN_UNDEF)
13716 /* Do this only for defined symbols. At link type, the static
13717 linker will simulate the work of dynamic linker of resolving
13718 symbols and will carry over the thumbness of found symbols to
13719 the output symbol table. It's not clear how it happens, but
13720 the thumbness of undefined symbols can well be different at
13721 runtime, and writing '1' for them will be confusing for users
13722 and possibly for dynamic linker itself.
13724 newsym.st_value |= 1;
13729 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13732 /* Add the PT_ARM_EXIDX program header. */
13735 elf32_arm_modify_segment_map (bfd *abfd,
13736 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13738 struct elf_segment_map *m;
13741 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13742 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13744 /* If there is already a PT_ARM_EXIDX header, then we do not
13745 want to add another one. This situation arises when running
13746 "strip"; the input binary already has the header. */
13747 m = elf_tdata (abfd)->segment_map;
13748 while (m && m->p_type != PT_ARM_EXIDX)
13752 m = (struct elf_segment_map *)
13753 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13756 m->p_type = PT_ARM_EXIDX;
13758 m->sections[0] = sec;
13760 m->next = elf_tdata (abfd)->segment_map;
13761 elf_tdata (abfd)->segment_map = m;
13768 /* We may add a PT_ARM_EXIDX program header. */
13771 elf32_arm_additional_program_headers (bfd *abfd,
13772 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13776 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13777 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13783 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13786 elf32_arm_is_function_type (unsigned int type)
13788 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13791 /* We use this to override swap_symbol_in and swap_symbol_out. */
13792 const struct elf_size_info elf32_arm_size_info =
13794 sizeof (Elf32_External_Ehdr),
13795 sizeof (Elf32_External_Phdr),
13796 sizeof (Elf32_External_Shdr),
13797 sizeof (Elf32_External_Rel),
13798 sizeof (Elf32_External_Rela),
13799 sizeof (Elf32_External_Sym),
13800 sizeof (Elf32_External_Dyn),
13801 sizeof (Elf_External_Note),
13805 ELFCLASS32, EV_CURRENT,
13806 bfd_elf32_write_out_phdrs,
13807 bfd_elf32_write_shdrs_and_ehdr,
13808 bfd_elf32_checksum_contents,
13809 bfd_elf32_write_relocs,
13810 elf32_arm_swap_symbol_in,
13811 elf32_arm_swap_symbol_out,
13812 bfd_elf32_slurp_reloc_table,
13813 bfd_elf32_slurp_symbol_table,
13814 bfd_elf32_swap_dyn_in,
13815 bfd_elf32_swap_dyn_out,
13816 bfd_elf32_swap_reloc_in,
13817 bfd_elf32_swap_reloc_out,
13818 bfd_elf32_swap_reloca_in,
13819 bfd_elf32_swap_reloca_out
13822 #define ELF_ARCH bfd_arch_arm
13823 #define ELF_MACHINE_CODE EM_ARM
13824 #ifdef __QNXTARGET__
13825 #define ELF_MAXPAGESIZE 0x1000
13827 #define ELF_MAXPAGESIZE 0x8000
13829 #define ELF_MINPAGESIZE 0x1000
13830 #define ELF_COMMONPAGESIZE 0x1000
13832 #define bfd_elf32_mkobject elf32_arm_mkobject
13834 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13835 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13836 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13837 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13838 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13839 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13840 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13841 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13842 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13843 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13844 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13845 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13846 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13848 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13849 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13850 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13851 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13852 #define elf_backend_check_relocs elf32_arm_check_relocs
13853 #define elf_backend_relocate_section elf32_arm_relocate_section
13854 #define elf_backend_write_section elf32_arm_write_section
13855 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13856 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13857 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13858 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13859 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13860 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13861 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13862 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13863 #define elf_backend_object_p elf32_arm_object_p
13864 #define elf_backend_section_flags elf32_arm_section_flags
13865 #define elf_backend_fake_sections elf32_arm_fake_sections
13866 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13867 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13868 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13869 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13870 #define elf_backend_size_info elf32_arm_size_info
13871 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13872 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13873 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13874 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13875 #define elf_backend_is_function_type elf32_arm_is_function_type
13877 #define elf_backend_can_refcount 1
13878 #define elf_backend_can_gc_sections 1
13879 #define elf_backend_plt_readonly 1
13880 #define elf_backend_want_got_plt 1
13881 #define elf_backend_want_plt_sym 0
13882 #define elf_backend_may_use_rel_p 1
13883 #define elf_backend_may_use_rela_p 0
13884 #define elf_backend_default_use_rela_p 0
13886 #define elf_backend_got_header_size 12
13888 #undef elf_backend_obj_attrs_vendor
13889 #define elf_backend_obj_attrs_vendor "aeabi"
13890 #undef elf_backend_obj_attrs_section
13891 #define elf_backend_obj_attrs_section ".ARM.attributes"
13892 #undef elf_backend_obj_attrs_arg_type
13893 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13894 #undef elf_backend_obj_attrs_section_type
13895 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13896 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13898 #include "elf32-target.h"
13900 /* VxWorks Targets. */
13902 #undef TARGET_LITTLE_SYM
13903 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13904 #undef TARGET_LITTLE_NAME
13905 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13906 #undef TARGET_BIG_SYM
13907 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13908 #undef TARGET_BIG_NAME
13909 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13911 /* Like elf32_arm_link_hash_table_create -- but overrides
13912 appropriately for VxWorks. */
13914 static struct bfd_link_hash_table *
13915 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13917 struct bfd_link_hash_table *ret;
13919 ret = elf32_arm_link_hash_table_create (abfd);
13922 struct elf32_arm_link_hash_table *htab
13923 = (struct elf32_arm_link_hash_table *) ret;
13925 htab->vxworks_p = 1;
13931 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13933 elf32_arm_final_write_processing (abfd, linker);
13934 elf_vxworks_final_write_processing (abfd, linker);
13938 #define elf32_bed elf32_arm_vxworks_bed
13940 #undef bfd_elf32_bfd_link_hash_table_create
13941 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13942 #undef elf_backend_add_symbol_hook
13943 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13944 #undef elf_backend_final_write_processing
13945 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13946 #undef elf_backend_emit_relocs
13947 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13949 #undef elf_backend_may_use_rel_p
13950 #define elf_backend_may_use_rel_p 0
13951 #undef elf_backend_may_use_rela_p
13952 #define elf_backend_may_use_rela_p 1
13953 #undef elf_backend_default_use_rela_p
13954 #define elf_backend_default_use_rela_p 1
13955 #undef elf_backend_want_plt_sym
13956 #define elf_backend_want_plt_sym 1
13957 #undef ELF_MAXPAGESIZE
13958 #define ELF_MAXPAGESIZE 0x1000
13960 #include "elf32-target.h"
13963 /* Merge backend specific data from an object file to the output
13964 object file when linking. */
13967 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
13969 flagword out_flags;
13971 bfd_boolean flags_compatible = TRUE;
13974 /* Check if we have the same endianess. */
13975 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
13978 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13981 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
13984 /* The input BFD must have had its flags initialised. */
13985 /* The following seems bogus to me -- The flags are initialized in
13986 the assembler but I don't think an elf_flags_init field is
13987 written into the object. */
13988 /* BFD_ASSERT (elf_flags_init (ibfd)); */
13990 in_flags = elf_elfheader (ibfd)->e_flags;
13991 out_flags = elf_elfheader (obfd)->e_flags;
13993 /* In theory there is no reason why we couldn't handle this. However
13994 in practice it isn't even close to working and there is no real
13995 reason to want it. */
13996 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
13997 && !(ibfd->flags & DYNAMIC)
13998 && (in_flags & EF_ARM_BE8))
14000 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14005 if (!elf_flags_init (obfd))
14007 /* If the input is the default architecture and had the default
14008 flags then do not bother setting the flags for the output
14009 architecture, instead allow future merges to do this. If no
14010 future merges ever set these flags then they will retain their
14011 uninitialised values, which surprise surprise, correspond
14012 to the default values. */
14013 if (bfd_get_arch_info (ibfd)->the_default
14014 && elf_elfheader (ibfd)->e_flags == 0)
14017 elf_flags_init (obfd) = TRUE;
14018 elf_elfheader (obfd)->e_flags = in_flags;
14020 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14021 && bfd_get_arch_info (obfd)->the_default)
14022 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14027 /* Determine what should happen if the input ARM architecture
14028 does not match the output ARM architecture. */
14029 if (! bfd_arm_merge_machines (ibfd, obfd))
14032 /* Identical flags must be compatible. */
14033 if (in_flags == out_flags)
14036 /* Check to see if the input BFD actually contains any sections. If
14037 not, its flags may not have been initialised either, but it
14038 cannot actually cause any incompatiblity. Do not short-circuit
14039 dynamic objects; their section list may be emptied by
14040 elf_link_add_object_symbols.
14042 Also check to see if there are no code sections in the input.
14043 In this case there is no need to check for code specific flags.
14044 XXX - do we need to worry about floating-point format compatability
14045 in data sections ? */
14046 if (!(ibfd->flags & DYNAMIC))
14048 bfd_boolean null_input_bfd = TRUE;
14049 bfd_boolean only_data_sections = TRUE;
14051 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14053 /* Ignore synthetic glue sections. */
14054 if (strcmp (sec->name, ".glue_7")
14055 && strcmp (sec->name, ".glue_7t"))
14057 if ((bfd_get_section_flags (ibfd, sec)
14058 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14059 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14060 only_data_sections = FALSE;
14062 null_input_bfd = FALSE;
14067 if (null_input_bfd || only_data_sections)
14071 /* Complain about various flag mismatches. */
14072 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14073 EF_ARM_EABI_VERSION (out_flags)))
14076 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14078 (in_flags & EF_ARM_EABIMASK) >> 24,
14079 (out_flags & EF_ARM_EABIMASK) >> 24);
14083 /* Not sure what needs to be checked for EABI versions >= 1. */
14084 /* VxWorks libraries do not use these flags. */
14085 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14086 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14087 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14089 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14092 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14094 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14095 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14096 flags_compatible = FALSE;
14099 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14101 if (in_flags & EF_ARM_APCS_FLOAT)
14103 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14107 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14110 flags_compatible = FALSE;
14113 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14115 if (in_flags & EF_ARM_VFP_FLOAT)
14117 (_("error: %B uses VFP instructions, whereas %B does not"),
14121 (_("error: %B uses FPA instructions, whereas %B does not"),
14124 flags_compatible = FALSE;
14127 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14129 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14131 (_("error: %B uses Maverick instructions, whereas %B does not"),
14135 (_("error: %B does not use Maverick instructions, whereas %B does"),
14138 flags_compatible = FALSE;
14141 #ifdef EF_ARM_SOFT_FLOAT
14142 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14144 /* We can allow interworking between code that is VFP format
14145 layout, and uses either soft float or integer regs for
14146 passing floating point arguments and results. We already
14147 know that the APCS_FLOAT flags match; similarly for VFP
14149 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14150 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14152 if (in_flags & EF_ARM_SOFT_FLOAT)
14154 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14158 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14161 flags_compatible = FALSE;
14166 /* Interworking mismatch is only a warning. */
14167 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14169 if (in_flags & EF_ARM_INTERWORK)
14172 (_("Warning: %B supports interworking, whereas %B does not"),
14178 (_("Warning: %B does not support interworking, whereas %B does"),
14184 return flags_compatible;
14188 /* Symbian OS Targets. */
14190 #undef TARGET_LITTLE_SYM
14191 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14192 #undef TARGET_LITTLE_NAME
14193 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14194 #undef TARGET_BIG_SYM
14195 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14196 #undef TARGET_BIG_NAME
14197 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14199 /* Like elf32_arm_link_hash_table_create -- but overrides
14200 appropriately for Symbian OS. */
14202 static struct bfd_link_hash_table *
14203 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14205 struct bfd_link_hash_table *ret;
14207 ret = elf32_arm_link_hash_table_create (abfd);
14210 struct elf32_arm_link_hash_table *htab
14211 = (struct elf32_arm_link_hash_table *)ret;
14212 /* There is no PLT header for Symbian OS. */
14213 htab->plt_header_size = 0;
14214 /* The PLT entries are each one instruction and one word. */
14215 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14216 htab->symbian_p = 1;
14217 /* Symbian uses armv5t or above, so use_blx is always true. */
14219 htab->root.is_relocatable_executable = 1;
14224 static const struct bfd_elf_special_section
14225 elf32_arm_symbian_special_sections[] =
14227 /* In a BPABI executable, the dynamic linking sections do not go in
14228 the loadable read-only segment. The post-linker may wish to
14229 refer to these sections, but they are not part of the final
14231 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14232 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14233 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14234 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14235 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14236 /* These sections do not need to be writable as the SymbianOS
14237 postlinker will arrange things so that no dynamic relocation is
14239 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14240 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14241 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14242 { NULL, 0, 0, 0, 0 }
14246 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14247 struct bfd_link_info *link_info)
14249 /* BPABI objects are never loaded directly by an OS kernel; they are
14250 processed by a postlinker first, into an OS-specific format. If
14251 the D_PAGED bit is set on the file, BFD will align segments on
14252 page boundaries, so that an OS can directly map the file. With
14253 BPABI objects, that just results in wasted space. In addition,
14254 because we clear the D_PAGED bit, map_sections_to_segments will
14255 recognize that the program headers should not be mapped into any
14256 loadable segment. */
14257 abfd->flags &= ~D_PAGED;
14258 elf32_arm_begin_write_processing (abfd, link_info);
14262 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14263 struct bfd_link_info *info)
14265 struct elf_segment_map *m;
14268 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14269 segment. However, because the .dynamic section is not marked
14270 with SEC_LOAD, the generic ELF code will not create such a
14272 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14275 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14276 if (m->p_type == PT_DYNAMIC)
14281 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14282 m->next = elf_tdata (abfd)->segment_map;
14283 elf_tdata (abfd)->segment_map = m;
14287 /* Also call the generic arm routine. */
14288 return elf32_arm_modify_segment_map (abfd, info);
14291 /* Return address for Ith PLT stub in section PLT, for relocation REL
14292 or (bfd_vma) -1 if it should not be included. */
14295 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14296 const arelent *rel ATTRIBUTE_UNUSED)
14298 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14303 #define elf32_bed elf32_arm_symbian_bed
14305 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14306 will process them and then discard them. */
14307 #undef ELF_DYNAMIC_SEC_FLAGS
14308 #define ELF_DYNAMIC_SEC_FLAGS \
14309 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14311 #undef elf_backend_add_symbol_hook
14312 #undef elf_backend_emit_relocs
14314 #undef bfd_elf32_bfd_link_hash_table_create
14315 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14316 #undef elf_backend_special_sections
14317 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14318 #undef elf_backend_begin_write_processing
14319 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14320 #undef elf_backend_final_write_processing
14321 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14323 #undef elf_backend_modify_segment_map
14324 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14326 /* There is no .got section for BPABI objects, and hence no header. */
14327 #undef elf_backend_got_header_size
14328 #define elf_backend_got_header_size 0
14330 /* Similarly, there is no .got.plt section. */
14331 #undef elf_backend_want_got_plt
14332 #define elf_backend_want_got_plt 0
14334 #undef elf_backend_plt_sym_val
14335 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14337 #undef elf_backend_may_use_rel_p
14338 #define elf_backend_may_use_rel_p 1
14339 #undef elf_backend_may_use_rela_p
14340 #define elf_backend_may_use_rela_p 0
14341 #undef elf_backend_default_use_rela_p
14342 #define elf_backend_default_use_rela_p 0
14343 #undef elf_backend_want_plt_sym
14344 #define elf_backend_want_plt_sym 0
14345 #undef ELF_MAXPAGESIZE
14346 #define ELF_MAXPAGESIZE 0x8000
14348 #include "elf32-target.h"