1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "libiberty.h"
29 #include "elf-vxworks.h"
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 static reloc_howto_type elf32_arm_howto_table_1[] =
76 HOWTO (R_ARM_NONE, /* type */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
80 FALSE, /* pc_relative */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
88 FALSE), /* pcrel_offset */
90 HOWTO (R_ARM_PC24, /* type */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
94 TRUE, /* pc_relative */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
109 FALSE, /* pc_relative */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
124 TRUE, /* pc_relative */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
139 TRUE, /* pc_relative */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
154 FALSE, /* pc_relative */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
169 FALSE, /* pc_relative */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
179 HOWTO (R_ARM_THM_ABS5, /* type */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
183 FALSE, /* pc_relative */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
194 HOWTO (R_ARM_ABS8, /* type */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
198 FALSE, /* pc_relative */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
208 HOWTO (R_ARM_SBREL32, /* type */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
212 FALSE, /* pc_relative */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
222 HOWTO (R_ARM_THM_CALL, /* type */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
226 TRUE, /* pc_relative */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
236 HOWTO (R_ARM_THM_PC8, /* type */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
240 TRUE, /* pc_relative */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
250 HOWTO (R_ARM_BREL_ADJ, /* type */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
254 FALSE, /* pc_relative */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
264 HOWTO (R_ARM_SWI24, /* type */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
268 FALSE, /* pc_relative */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
278 HOWTO (R_ARM_THM_SWI8, /* type */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
282 FALSE, /* pc_relative */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
297 TRUE, /* pc_relative */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
312 TRUE, /* pc_relative */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
322 /* Dynamic TLS relocations. */
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
328 FALSE, /* pc_relative */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
342 FALSE, /* pc_relative */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
356 FALSE, /* pc_relative */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
366 /* Relocs used in ARM Linux */
368 HOWTO (R_ARM_COPY, /* type */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
372 FALSE, /* pc_relative */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
382 HOWTO (R_ARM_GLOB_DAT, /* type */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
386 FALSE, /* pc_relative */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
400 FALSE, /* pc_relative */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
410 HOWTO (R_ARM_RELATIVE, /* type */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
414 FALSE, /* pc_relative */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
424 HOWTO (R_ARM_GOTOFF32, /* type */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
428 FALSE, /* pc_relative */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
438 HOWTO (R_ARM_GOTPC, /* type */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
442 TRUE, /* pc_relative */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
452 HOWTO (R_ARM_GOT32, /* type */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
456 FALSE, /* pc_relative */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
466 HOWTO (R_ARM_PLT32, /* type */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
470 TRUE, /* pc_relative */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
480 HOWTO (R_ARM_CALL, /* type */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
484 TRUE, /* pc_relative */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
494 HOWTO (R_ARM_JUMP24, /* type */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
498 TRUE, /* pc_relative */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
508 HOWTO (R_ARM_THM_JUMP24, /* type */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
512 TRUE, /* pc_relative */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
522 HOWTO (R_ARM_BASE_ABS, /* type */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
526 FALSE, /* pc_relative */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
540 TRUE, /* pc_relative */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
554 TRUE, /* pc_relative */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 TRUE, /* pc_relative */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
582 FALSE, /* pc_relative */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
596 FALSE, /* pc_relative */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
610 FALSE, /* pc_relative */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
620 HOWTO (R_ARM_TARGET1, /* type */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
624 FALSE, /* pc_relative */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
634 HOWTO (R_ARM_ROSEGREL32, /* type */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
638 FALSE, /* pc_relative */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
648 HOWTO (R_ARM_V4BX, /* type */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
652 FALSE, /* pc_relative */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
662 HOWTO (R_ARM_TARGET2, /* type */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
666 FALSE, /* pc_relative */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
676 HOWTO (R_ARM_PREL31, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE, /* pc_relative */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
694 FALSE, /* pc_relative */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
704 HOWTO (R_ARM_MOVT_ABS, /* type */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
708 FALSE, /* pc_relative */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
722 TRUE, /* pc_relative */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
732 HOWTO (R_ARM_MOVT_PREL, /* type */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
736 TRUE, /* pc_relative */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
750 FALSE, /* pc_relative */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
764 FALSE, /* pc_relative */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
778 TRUE, /* pc_relative */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
792 TRUE, /* pc_relative */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
802 HOWTO (R_ARM_THM_JUMP19, /* type */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
806 TRUE, /* pc_relative */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
816 HOWTO (R_ARM_THM_JUMP6, /* type */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
820 TRUE, /* pc_relative */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
837 TRUE, /* pc_relative */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
847 HOWTO (R_ARM_THM_PC12, /* type */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
851 TRUE, /* pc_relative */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
861 HOWTO (R_ARM_ABS32_NOI, /* type */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
865 FALSE, /* pc_relative */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
875 HOWTO (R_ARM_REL32_NOI, /* type */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
879 TRUE, /* pc_relative */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
889 /* Group relocations. */
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
895 TRUE, /* pc_relative */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
909 TRUE, /* pc_relative */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
923 TRUE, /* pc_relative */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
937 TRUE, /* pc_relative */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 TRUE, /* pc_relative */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
965 TRUE, /* pc_relative */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
979 TRUE, /* pc_relative */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
993 TRUE, /* pc_relative */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1007 TRUE, /* pc_relative */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1021 TRUE, /* pc_relative */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 TRUE, /* pc_relative */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1049 TRUE, /* pc_relative */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1063 TRUE, /* pc_relative */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1077 TRUE, /* pc_relative */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1091 TRUE, /* pc_relative */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1105 TRUE, /* pc_relative */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1119 TRUE, /* pc_relative */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1133 TRUE, /* pc_relative */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1147 TRUE, /* pc_relative */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1161 TRUE, /* pc_relative */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1175 TRUE, /* pc_relative */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1189 TRUE, /* pc_relative */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1203 TRUE, /* pc_relative */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1217 TRUE, /* pc_relative */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1231 TRUE, /* pc_relative */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1245 TRUE, /* pc_relative */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1259 TRUE, /* pc_relative */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1269 /* End of group relocations. */
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1275 FALSE, /* pc_relative */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1289 FALSE, /* pc_relative */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1303 FALSE, /* pc_relative */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1317 FALSE, /* pc_relative */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1331 FALSE, /* pc_relative */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1345 FALSE, /* pc_relative */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1355 EMPTY_HOWTO (90), /* Unallocated. */
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 FALSE, /* pc_relative */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 FALSE, /* pc_relative */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 TRUE, /* pc_relative */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 FALSE, /* pc_relative */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 FALSE, /* pc_relative */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1437 FALSE, /* pc_relative */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1445 FALSE), /* pcrel_offset */
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1452 FALSE, /* pc_relative */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1460 FALSE), /* pcrel_offset */
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1466 TRUE, /* pc_relative */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1480 TRUE, /* pc_relative */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1495 FALSE, /* pc_relative */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1509 FALSE, /* pc_relative */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1523 FALSE, /* pc_relative */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1537 FALSE, /* pc_relative */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 FALSE, /* pc_relative */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 FALSE, /* pc_relative */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 FALSE, /* pc_relative */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 FALSE, /* pc_relative */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1608 249-255 extended, currently unused, relocations: */
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1612 HOWTO (R_ARM_RREL32, /* type */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1616 FALSE, /* pc_relative */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1624 FALSE), /* pcrel_offset */
1626 HOWTO (R_ARM_RABS32, /* type */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1630 FALSE, /* pc_relative */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1638 FALSE), /* pcrel_offset */
1640 HOWTO (R_ARM_RPC24, /* type */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1644 FALSE, /* pc_relative */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1652 FALSE), /* pcrel_offset */
1654 HOWTO (R_ARM_RBASE, /* type */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1658 FALSE, /* pc_relative */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1666 FALSE) /* pcrel_offset */
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1686 unsigned int r_type;
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 struct elf32_arm_reloc_map
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1725 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1726 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1727 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1728 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1729 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1730 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1731 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1732 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1733 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1734 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1735 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1736 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1737 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1738 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1739 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1740 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1741 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1742 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1743 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1744 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1745 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1746 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1747 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1748 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1749 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1750 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1751 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1752 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1753 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1754 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1755 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1756 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1757 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1758 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1759 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1760 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1761 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1762 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1763 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1764 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1765 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1766 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1767 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1768 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1769 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1770 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1771 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1772 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1773 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1774 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1775 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1776 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1777 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1778 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1779 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1782 static reloc_howto_type *
1783 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1784 bfd_reloc_code_real_type code)
1788 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1789 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1790 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1795 static reloc_howto_type *
1796 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1801 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1802 if (elf32_arm_howto_table_1[i].name != NULL
1803 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1804 return &elf32_arm_howto_table_1[i];
1806 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1807 if (elf32_arm_howto_table_2[i].name != NULL
1808 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1809 return &elf32_arm_howto_table_2[i];
1814 /* Support for core dump NOTE sections. */
1817 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1822 switch (note->descsz)
1827 case 148: /* Linux/ARM 32-bit. */
1829 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1832 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1841 /* Make a ".reg/999" section. */
1842 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1843 size, note->descpos + offset);
1847 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 switch (note->descsz)
1854 case 124: /* Linux/ARM elf_prpsinfo. */
1855 elf_tdata (abfd)->core_program
1856 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1857 elf_tdata (abfd)->core_command
1858 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1861 /* Note that for some reason, a spurious space is tacked
1862 onto the end of the args in some (at least one anyway)
1863 implementations, so strip it off if it exists. */
1865 char *command = elf_tdata (abfd)->core_command;
1866 int n = strlen (command);
1868 if (0 < n && command[n - 1] == ' ')
1869 command[n - 1] = '\0';
1875 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1876 #define TARGET_LITTLE_NAME "elf32-littlearm"
1877 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1878 #define TARGET_BIG_NAME "elf32-bigarm"
1880 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1881 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883 typedef unsigned long int insn32;
1884 typedef unsigned short int insn16;
1886 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 #define INTERWORK_FLAG(abfd) \
1889 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1890 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1891 || ((abfd)->flags & BFD_LINKER_CREATED))
1893 /* The linker script knows the section names for placement.
1894 The entry_names are used to do simple name mangling on the stubs.
1895 Given a function name, and its type, the stub can be found. The
1896 name can be changed. The only requirement is the %s be present. */
1897 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1898 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1901 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1904 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1907 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909 #define STUB_ENTRY_NAME "__%s_veneer"
1911 /* The name of the dynamic interpreter. This is put in the .interp
1913 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915 #ifdef FOUR_WORD_PLT
1917 /* The first entry in a procedure linkage table looks like
1918 this. It is set up so that any shared library function that is
1919 called before the relocation has been set up calls the dynamic
1921 static const bfd_vma elf32_arm_plt0_entry [] =
1923 0xe52de004, /* str lr, [sp, #-4]! */
1924 0xe59fe010, /* ldr lr, [pc, #16] */
1925 0xe08fe00e, /* add lr, pc, lr */
1926 0xe5bef008, /* ldr pc, [lr, #8]! */
1929 /* Subsequent entries in a procedure linkage table look like
1931 static const bfd_vma elf32_arm_plt_entry [] =
1933 0xe28fc600, /* add ip, pc, #NN */
1934 0xe28cca00, /* add ip, ip, #NN */
1935 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1936 0x00000000, /* unused */
1941 /* The first entry in a procedure linkage table looks like
1942 this. It is set up so that any shared library function that is
1943 called before the relocation has been set up calls the dynamic
1945 static const bfd_vma elf32_arm_plt0_entry [] =
1947 0xe52de004, /* str lr, [sp, #-4]! */
1948 0xe59fe004, /* ldr lr, [pc, #4] */
1949 0xe08fe00e, /* add lr, pc, lr */
1950 0xe5bef008, /* ldr pc, [lr, #8]! */
1951 0x00000000, /* &GOT[0] - . */
1954 /* Subsequent entries in a procedure linkage table look like
1956 static const bfd_vma elf32_arm_plt_entry [] =
1958 0xe28fc600, /* add ip, pc, #0xNN00000 */
1959 0xe28cca00, /* add ip, ip, #0xNN000 */
1960 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1965 /* The format of the first entry in the procedure linkage table
1966 for a VxWorks executable. */
1967 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 0xe52dc008, /* str ip,[sp,#-8]! */
1970 0xe59fc000, /* ldr ip,[pc] */
1971 0xe59cf008, /* ldr pc,[ip,#8] */
1972 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1975 /* The format of subsequent entries in a VxWorks executable. */
1976 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 0xe59fc000, /* ldr ip,[pc] */
1979 0xe59cf000, /* ldr pc,[ip] */
1980 0x00000000, /* .long @got */
1981 0xe59fc000, /* ldr ip,[pc] */
1982 0xea000000, /* b _PLT */
1983 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1986 /* The format of entries in a VxWorks shared library. */
1987 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 0xe59fc000, /* ldr ip,[pc] */
1990 0xe79cf009, /* ldr pc,[ip,r9] */
1991 0x00000000, /* .long @got */
1992 0xe59fc000, /* ldr ip,[pc] */
1993 0xe599f008, /* ldr pc,[r9,#8] */
1994 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1997 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1998 #define PLT_THUMB_STUB_SIZE 4
1999 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2005 /* The entries in a PLT when using a DLL-based target with multiple
2007 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 0xe51ff004, /* ldr pc, [pc, #-4] */
2010 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2013 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2014 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2015 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2016 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2017 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2018 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2028 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2029 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2030 is inserted in arm_build_one_stub(). */
2031 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2032 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2033 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2034 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2035 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2036 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2041 enum stub_insn_type type;
2042 unsigned int r_type;
2046 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2047 to reach the stub if necessary. */
2048 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2051 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2054 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2059 ARM_INSN(0xe12fff1c), /* bx ip */
2060 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2063 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2064 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 THUMB16_INSN(0xb401), /* push {r0} */
2067 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2068 THUMB16_INSN(0x4684), /* mov ip, r0 */
2069 THUMB16_INSN(0xbc01), /* pop {r0} */
2070 THUMB16_INSN(0x4760), /* bx ip */
2071 THUMB16_INSN(0xbf00), /* nop */
2072 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2075 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 THUMB16_INSN(0x4778), /* bx pc */
2080 THUMB16_INSN(0x46c0), /* nop */
2081 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2082 ARM_INSN(0xe12fff1c), /* bx ip */
2083 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2086 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 THUMB16_INSN(0x4778), /* bx pc */
2091 THUMB16_INSN(0x46c0), /* nop */
2092 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2093 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2096 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2097 one, when the destination is close enough. */
2098 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 THUMB16_INSN(0x4778), /* bx pc */
2101 THUMB16_INSN(0x46c0), /* nop */
2102 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2105 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2106 blx to reach the stub if necessary. */
2107 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 ARM_INSN(0xe59fc000), /* ldr ip, [pc] */
2110 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2111 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2114 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2115 blx to reach the stub if necessary. We can not add into pc;
2116 it is not guaranteed to mode switch (different in ARMv6 and
2118 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2121 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2122 ARM_INSN(0xe12fff1c), /* bx ip */
2123 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2126 /* V4T ARM -> ARM long branch stub, PIC. */
2127 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2130 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2131 ARM_INSN(0xe12fff1c), /* bx ip */
2132 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2135 /* V4T Thumb -> ARM long branch stub, PIC. */
2136 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 THUMB16_INSN(0x4778), /* bx pc */
2139 THUMB16_INSN(0x46c0), /* nop */
2140 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2141 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2142 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2145 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 THUMB16_INSN(0xb401), /* push {r0} */
2150 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2151 THUMB16_INSN(0x46fc), /* mov ip, pc */
2152 THUMB16_INSN(0x4484), /* add ip, r0 */
2153 THUMB16_INSN(0xbc01), /* pop {r0} */
2154 THUMB16_INSN(0x4760), /* bx ip */
2155 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2158 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 THUMB16_INSN(0x4778), /* bx pc */
2163 THUMB16_INSN(0x46c0), /* nop */
2164 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2165 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2166 ARM_INSN(0xe12fff1c), /* bx ip */
2167 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2170 /* Cortex-A8 erratum-workaround stubs. */
2172 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2173 can't use a conditional branch to reach this stub). */
2175 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2178 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2179 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2182 /* Stub used for b.w and bl.w instructions. */
2184 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2189 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2194 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2195 instruction (which switches to ARM mode) to point to this stub. Jump to the
2196 real destination using an ARM-mode branch. */
2198 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2203 /* Section name for stubs is the associated section name plus this
2205 #define STUB_SUFFIX ".stub"
2207 /* One entry per long/short branch stub defined above. */
2209 DEF_STUB(long_branch_any_any) \
2210 DEF_STUB(long_branch_v4t_arm_thumb) \
2211 DEF_STUB(long_branch_thumb_only) \
2212 DEF_STUB(long_branch_v4t_thumb_thumb) \
2213 DEF_STUB(long_branch_v4t_thumb_arm) \
2214 DEF_STUB(short_branch_v4t_thumb_arm) \
2215 DEF_STUB(long_branch_any_arm_pic) \
2216 DEF_STUB(long_branch_any_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2220 DEF_STUB(long_branch_thumb_only_pic) \
2221 DEF_STUB(a8_veneer_b_cond) \
2222 DEF_STUB(a8_veneer_b) \
2223 DEF_STUB(a8_veneer_bl) \
2224 DEF_STUB(a8_veneer_blx)
2226 #define DEF_STUB(x) arm_stub_##x,
2227 enum elf32_arm_stub_type {
2230 /* Note the first a8_veneer type */
2231 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2237 const insn_sequence* template_sequence;
2241 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2242 static const stub_def stub_definitions[] = {
2247 struct elf32_arm_stub_hash_entry
2249 /* Base hash table entry structure. */
2250 struct bfd_hash_entry root;
2252 /* The stub section. */
2255 /* Offset within stub_sec of the beginning of this stub. */
2256 bfd_vma stub_offset;
2258 /* Given the symbol's value and its section we can determine its final
2259 value when building the stubs (so the stub knows where to jump). */
2260 bfd_vma target_value;
2261 asection *target_section;
2263 /* Offset to apply to relocation referencing target_value. */
2264 bfd_vma target_addend;
2266 /* The instruction which caused this stub to be generated (only valid for
2267 Cortex-A8 erratum workaround stubs at present). */
2268 unsigned long orig_insn;
2270 /* The stub type. */
2271 enum elf32_arm_stub_type stub_type;
2272 /* Its encoding size in bytes. */
2275 const insn_sequence *stub_template;
2276 /* The size of the template (number of entries). */
2277 int stub_template_size;
2279 /* The symbol table entry, if any, that this was derived from. */
2280 struct elf32_arm_link_hash_entry *h;
2282 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2283 unsigned char st_type;
2285 /* Where this stub is being called from, or, in the case of combined
2286 stub sections, the first input section in the group. */
2289 /* The name for the local symbol at the start of this stub. The
2290 stub name in the hash table has to be unique; this does not, so
2291 it can be friendlier. */
2295 /* Used to build a map of a section. This is required for mixed-endian
2298 typedef struct elf32_elf_section_map
2303 elf32_arm_section_map;
2305 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2309 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2310 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2311 VFP11_ERRATUM_ARM_VENEER,
2312 VFP11_ERRATUM_THUMB_VENEER
2314 elf32_vfp11_erratum_type;
2316 typedef struct elf32_vfp11_erratum_list
2318 struct elf32_vfp11_erratum_list *next;
2324 struct elf32_vfp11_erratum_list *veneer;
2325 unsigned int vfp_insn;
2329 struct elf32_vfp11_erratum_list *branch;
2333 elf32_vfp11_erratum_type type;
2335 elf32_vfp11_erratum_list;
2340 INSERT_EXIDX_CANTUNWIND_AT_END
2342 arm_unwind_edit_type;
2344 /* A (sorted) list of edits to apply to an unwind table. */
2345 typedef struct arm_unwind_table_edit
2347 arm_unwind_edit_type type;
2348 /* Note: we sometimes want to insert an unwind entry corresponding to a
2349 section different from the one we're currently writing out, so record the
2350 (text) section this edit relates to here. */
2351 asection *linked_section;
2353 struct arm_unwind_table_edit *next;
2355 arm_unwind_table_edit;
2357 typedef struct _arm_elf_section_data
2359 /* Information about mapping symbols. */
2360 struct bfd_elf_section_data elf;
2361 unsigned int mapcount;
2362 unsigned int mapsize;
2363 elf32_arm_section_map *map;
2364 /* Information about CPU errata. */
2365 unsigned int erratumcount;
2366 elf32_vfp11_erratum_list *erratumlist;
2367 /* Information about unwind tables. */
2370 /* Unwind info attached to a text section. */
2373 asection *arm_exidx_sec;
2376 /* Unwind info attached to an .ARM.exidx section. */
2379 arm_unwind_table_edit *unwind_edit_list;
2380 arm_unwind_table_edit *unwind_edit_tail;
2384 _arm_elf_section_data;
2386 #define elf32_arm_section_data(sec) \
2387 ((_arm_elf_section_data *) elf_section_data (sec))
2389 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2390 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2391 so may be created multiple times: we use an array of these entries whilst
2392 relaxing which we can refresh easily, then create stubs for each potentially
2393 erratum-triggering instruction once we've settled on a solution. */
2395 struct a8_erratum_fix {
2400 unsigned long orig_insn;
2402 enum elf32_arm_stub_type stub_type;
2406 /* A table of relocs applied to branches which might trigger Cortex-A8
2409 struct a8_erratum_reloc {
2411 bfd_vma destination;
2412 struct elf32_arm_link_hash_entry *hash;
2413 const char *sym_name;
2414 unsigned int r_type;
2415 unsigned char st_type;
2416 bfd_boolean non_a8_stub;
2419 /* The size of the thread control block. */
2422 struct elf_arm_obj_tdata
2424 struct elf_obj_tdata root;
2426 /* tls_type for each local got entry. */
2427 char *local_got_tls_type;
2429 /* Zero to warn when linking objects with incompatible enum sizes. */
2430 int no_enum_size_warning;
2432 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2433 int no_wchar_size_warning;
2436 #define elf_arm_tdata(bfd) \
2437 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2439 #define elf32_arm_local_got_tls_type(bfd) \
2440 (elf_arm_tdata (bfd)->local_got_tls_type)
2442 #define is_arm_elf(bfd) \
2443 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2444 && elf_tdata (bfd) != NULL \
2445 && elf_object_id (bfd) == ARM_ELF_DATA)
2448 elf32_arm_mkobject (bfd *abfd)
2450 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2454 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2456 /* Arm ELF linker hash entry. */
2457 struct elf32_arm_link_hash_entry
2459 struct elf_link_hash_entry root;
2461 /* Track dynamic relocs copied for this symbol. */
2462 struct elf_dyn_relocs *dyn_relocs;
2464 /* We reference count Thumb references to a PLT entry separately,
2465 so that we can emit the Thumb trampoline only if needed. */
2466 bfd_signed_vma plt_thumb_refcount;
2468 /* Some references from Thumb code may be eliminated by BL->BLX
2469 conversion, so record them separately. */
2470 bfd_signed_vma plt_maybe_thumb_refcount;
2472 /* Since PLT entries have variable size if the Thumb prologue is
2473 used, we need to record the index into .got.plt instead of
2474 recomputing it from the PLT offset. */
2475 bfd_signed_vma plt_got_offset;
2477 #define GOT_UNKNOWN 0
2478 #define GOT_NORMAL 1
2479 #define GOT_TLS_GD 2
2480 #define GOT_TLS_IE 4
2481 unsigned char tls_type;
2483 /* The symbol marking the real symbol location for exported thumb
2484 symbols with Arm stubs. */
2485 struct elf_link_hash_entry *export_glue;
2487 /* A pointer to the most recently used stub hash entry against this
2489 struct elf32_arm_stub_hash_entry *stub_cache;
2492 /* Traverse an arm ELF linker hash table. */
2493 #define elf32_arm_link_hash_traverse(table, func, info) \
2494 (elf_link_hash_traverse \
2496 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2499 /* Get the ARM elf linker hash table from a link_info structure. */
2500 #define elf32_arm_hash_table(info) \
2501 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2502 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2504 #define arm_stub_hash_lookup(table, string, create, copy) \
2505 ((struct elf32_arm_stub_hash_entry *) \
2506 bfd_hash_lookup ((table), (string), (create), (copy)))
2508 /* Array to keep track of which stub sections have been created, and
2509 information on stub grouping. */
2512 /* This is the section to which stubs in the group will be
2515 /* The stub section. */
2519 /* ARM ELF linker hash table. */
2520 struct elf32_arm_link_hash_table
2522 /* The main hash table. */
2523 struct elf_link_hash_table root;
2525 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2526 bfd_size_type thumb_glue_size;
2528 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2529 bfd_size_type arm_glue_size;
2531 /* The size in bytes of section containing the ARMv4 BX veneers. */
2532 bfd_size_type bx_glue_size;
2534 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2535 veneer has been populated. */
2536 bfd_vma bx_glue_offset[15];
2538 /* The size in bytes of the section containing glue for VFP11 erratum
2540 bfd_size_type vfp11_erratum_glue_size;
2542 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2543 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2544 elf32_arm_write_section(). */
2545 struct a8_erratum_fix *a8_erratum_fixes;
2546 unsigned int num_a8_erratum_fixes;
2548 /* An arbitrary input BFD chosen to hold the glue sections. */
2549 bfd * bfd_of_glue_owner;
2551 /* Nonzero to output a BE8 image. */
2554 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2555 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2558 /* The relocation to use for R_ARM_TARGET2 relocations. */
2561 /* 0 = Ignore R_ARM_V4BX.
2562 1 = Convert BX to MOV PC.
2563 2 = Generate v4 interworing stubs. */
2566 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2569 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2572 /* What sort of code sequences we should look for which may trigger the
2573 VFP11 denorm erratum. */
2574 bfd_arm_vfp11_fix vfp11_fix;
2576 /* Global counter for the number of fixes we have emitted. */
2577 int num_vfp11_fixes;
2579 /* Nonzero to force PIC branch veneers. */
2582 /* The number of bytes in the initial entry in the PLT. */
2583 bfd_size_type plt_header_size;
2585 /* The number of bytes in the subsequent PLT etries. */
2586 bfd_size_type plt_entry_size;
2588 /* True if the target system is VxWorks. */
2591 /* True if the target system is Symbian OS. */
2594 /* True if the target uses REL relocations. */
2597 /* Short-cuts to get to dynamic linker sections. */
2601 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2604 /* Data for R_ARM_TLS_LDM32 relocations. */
2607 bfd_signed_vma refcount;
2611 /* Small local sym cache. */
2612 struct sym_cache sym_cache;
2614 /* For convenience in allocate_dynrelocs. */
2617 /* The stub hash table. */
2618 struct bfd_hash_table stub_hash_table;
2620 /* Linker stub bfd. */
2623 /* Linker call-backs. */
2624 asection * (*add_stub_section) (const char *, asection *);
2625 void (*layout_sections_again) (void);
2627 /* Array to keep track of which stub sections have been created, and
2628 information on stub grouping. */
2629 struct map_stub *stub_group;
2631 /* Number of elements in stub_group. */
2634 /* Assorted information used by elf32_arm_size_stubs. */
2635 unsigned int bfd_count;
2637 asection **input_list;
2640 /* Create an entry in an ARM ELF linker hash table. */
2642 static struct bfd_hash_entry *
2643 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2644 struct bfd_hash_table * table,
2645 const char * string)
2647 struct elf32_arm_link_hash_entry * ret =
2648 (struct elf32_arm_link_hash_entry *) entry;
2650 /* Allocate the structure if it has not already been allocated by a
2653 ret = (struct elf32_arm_link_hash_entry *)
2654 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2656 return (struct bfd_hash_entry *) ret;
2658 /* Call the allocation method of the superclass. */
2659 ret = ((struct elf32_arm_link_hash_entry *)
2660 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2664 ret->dyn_relocs = NULL;
2665 ret->tls_type = GOT_UNKNOWN;
2666 ret->plt_thumb_refcount = 0;
2667 ret->plt_maybe_thumb_refcount = 0;
2668 ret->plt_got_offset = -1;
2669 ret->export_glue = NULL;
2671 ret->stub_cache = NULL;
2674 return (struct bfd_hash_entry *) ret;
2677 /* Initialize an entry in the stub hash table. */
2679 static struct bfd_hash_entry *
2680 stub_hash_newfunc (struct bfd_hash_entry *entry,
2681 struct bfd_hash_table *table,
2684 /* Allocate the structure if it has not already been allocated by a
2688 entry = (struct bfd_hash_entry *)
2689 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2694 /* Call the allocation method of the superclass. */
2695 entry = bfd_hash_newfunc (entry, table, string);
2698 struct elf32_arm_stub_hash_entry *eh;
2700 /* Initialize the local fields. */
2701 eh = (struct elf32_arm_stub_hash_entry *) entry;
2702 eh->stub_sec = NULL;
2703 eh->stub_offset = 0;
2704 eh->target_value = 0;
2705 eh->target_section = NULL;
2706 eh->target_addend = 0;
2708 eh->stub_type = arm_stub_none;
2710 eh->stub_template = NULL;
2711 eh->stub_template_size = 0;
2714 eh->output_name = NULL;
2720 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2721 shortcuts to them in our hash table. */
2724 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2726 struct elf32_arm_link_hash_table *htab;
2728 htab = elf32_arm_hash_table (info);
2732 /* BPABI objects never have a GOT, or associated sections. */
2733 if (htab->symbian_p)
2736 if (! _bfd_elf_create_got_section (dynobj, info))
2742 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2743 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2747 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2749 struct elf32_arm_link_hash_table *htab;
2751 htab = elf32_arm_hash_table (info);
2755 if (!htab->root.sgot && !create_got_section (dynobj, info))
2758 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2761 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2763 htab->srelbss = bfd_get_section_by_name (dynobj,
2764 RELOC_SECTION (htab, ".bss"));
2766 if (htab->vxworks_p)
2768 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2773 htab->plt_header_size = 0;
2774 htab->plt_entry_size
2775 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2779 htab->plt_header_size
2780 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2781 htab->plt_entry_size
2782 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2786 if (!htab->root.splt
2787 || !htab->root.srelplt
2789 || (!info->shared && !htab->srelbss))
2795 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2798 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2799 struct elf_link_hash_entry *dir,
2800 struct elf_link_hash_entry *ind)
2802 struct elf32_arm_link_hash_entry *edir, *eind;
2804 edir = (struct elf32_arm_link_hash_entry *) dir;
2805 eind = (struct elf32_arm_link_hash_entry *) ind;
2807 if (eind->dyn_relocs != NULL)
2809 if (edir->dyn_relocs != NULL)
2811 struct elf_dyn_relocs **pp;
2812 struct elf_dyn_relocs *p;
2814 /* Add reloc counts against the indirect sym to the direct sym
2815 list. Merge any entries against the same section. */
2816 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
2818 struct elf_dyn_relocs *q;
2820 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2821 if (q->sec == p->sec)
2823 q->pc_count += p->pc_count;
2824 q->count += p->count;
2831 *pp = edir->dyn_relocs;
2834 edir->dyn_relocs = eind->dyn_relocs;
2835 eind->dyn_relocs = NULL;
2838 if (ind->root.type == bfd_link_hash_indirect)
2840 /* Copy over PLT info. */
2841 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2842 eind->plt_thumb_refcount = 0;
2843 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2844 eind->plt_maybe_thumb_refcount = 0;
2846 if (dir->got.refcount <= 0)
2848 edir->tls_type = eind->tls_type;
2849 eind->tls_type = GOT_UNKNOWN;
2853 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2856 /* Create an ARM elf linker hash table. */
2858 static struct bfd_link_hash_table *
2859 elf32_arm_link_hash_table_create (bfd *abfd)
2861 struct elf32_arm_link_hash_table *ret;
2862 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2864 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2868 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2869 elf32_arm_link_hash_newfunc,
2870 sizeof (struct elf32_arm_link_hash_entry),
2877 ret->sdynbss = NULL;
2878 ret->srelbss = NULL;
2879 ret->srelplt2 = NULL;
2880 ret->thumb_glue_size = 0;
2881 ret->arm_glue_size = 0;
2882 ret->bx_glue_size = 0;
2883 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2884 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2885 ret->vfp11_erratum_glue_size = 0;
2886 ret->num_vfp11_fixes = 0;
2887 ret->fix_cortex_a8 = 0;
2888 ret->bfd_of_glue_owner = NULL;
2889 ret->byteswap_code = 0;
2890 ret->target1_is_rel = 0;
2891 ret->target2_reloc = R_ARM_NONE;
2892 #ifdef FOUR_WORD_PLT
2893 ret->plt_header_size = 16;
2894 ret->plt_entry_size = 16;
2896 ret->plt_header_size = 20;
2897 ret->plt_entry_size = 12;
2904 ret->sym_cache.abfd = NULL;
2906 ret->tls_ldm_got.refcount = 0;
2907 ret->stub_bfd = NULL;
2908 ret->add_stub_section = NULL;
2909 ret->layout_sections_again = NULL;
2910 ret->stub_group = NULL;
2914 ret->input_list = NULL;
2916 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2917 sizeof (struct elf32_arm_stub_hash_entry)))
2923 return &ret->root.root;
2926 /* Free the derived linker hash table. */
2929 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2931 struct elf32_arm_link_hash_table *ret
2932 = (struct elf32_arm_link_hash_table *) hash;
2934 bfd_hash_table_free (&ret->stub_hash_table);
2935 _bfd_generic_link_hash_table_free (hash);
2938 /* Determine if we're dealing with a Thumb only architecture. */
2941 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2943 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2947 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
2950 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2953 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2954 Tag_CPU_arch_profile);
2956 return profile == 'M';
2959 /* Determine if we're dealing with a Thumb-2 object. */
2962 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2964 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2966 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2969 /* Determine what kind of NOPs are available. */
2972 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
2974 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2976 return arch == TAG_CPU_ARCH_V6T2
2977 || arch == TAG_CPU_ARCH_V6K
2978 || arch == TAG_CPU_ARCH_V7
2979 || arch == TAG_CPU_ARCH_V7E_M;
2983 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
2985 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2987 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
2988 || arch == TAG_CPU_ARCH_V7E_M);
2992 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2996 case arm_stub_long_branch_thumb_only:
2997 case arm_stub_long_branch_v4t_thumb_arm:
2998 case arm_stub_short_branch_v4t_thumb_arm:
2999 case arm_stub_long_branch_v4t_thumb_arm_pic:
3000 case arm_stub_long_branch_thumb_only_pic:
3011 /* Determine the type of stub needed, if any, for a call. */
3013 static enum elf32_arm_stub_type
3014 arm_type_of_stub (struct bfd_link_info *info,
3015 asection *input_sec,
3016 const Elf_Internal_Rela *rel,
3017 int *actual_st_type,
3018 struct elf32_arm_link_hash_entry *hash,
3019 bfd_vma destination,
3025 bfd_signed_vma branch_offset;
3026 unsigned int r_type;
3027 struct elf32_arm_link_hash_table * globals;
3030 enum elf32_arm_stub_type stub_type = arm_stub_none;
3032 int st_type = *actual_st_type;
3034 /* We don't know the actual type of destination in case it is of
3035 type STT_SECTION: give up. */
3036 if (st_type == STT_SECTION)
3039 globals = elf32_arm_hash_table (info);
3040 if (globals == NULL)
3043 thumb_only = using_thumb_only (globals);
3045 thumb2 = using_thumb2 (globals);
3047 /* Determine where the call point is. */
3048 location = (input_sec->output_offset
3049 + input_sec->output_section->vma
3052 r_type = ELF32_R_TYPE (rel->r_info);
3054 /* Keep a simpler condition, for the sake of clarity. */
3055 if (globals->root.splt != NULL
3057 && hash->root.plt.offset != (bfd_vma) -1)
3061 /* Note when dealing with PLT entries: the main PLT stub is in
3062 ARM mode, so if the branch is in Thumb mode, another
3063 Thumb->ARM stub will be inserted later just before the ARM
3064 PLT stub. We don't take this extra distance into account
3065 here, because if a long branch stub is needed, we'll add a
3066 Thumb->Arm one and branch directly to the ARM PLT entry
3067 because it avoids spreading offset corrections in several
3070 destination = (globals->root.splt->output_section->vma
3071 + globals->root.splt->output_offset
3072 + hash->root.plt.offset);
3076 branch_offset = (bfd_signed_vma)(destination - location);
3078 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3080 /* Handle cases where:
3081 - this call goes too far (different Thumb/Thumb2 max
3083 - it's a Thumb->Arm call and blx is not available, or it's a
3084 Thumb->Arm branch (not bl). A stub is needed in this case,
3085 but only if this call is not through a PLT entry. Indeed,
3086 PLT stubs handle mode switching already.
3089 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3090 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3092 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3093 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3094 || ((st_type != STT_ARM_TFUNC)
3095 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3096 || (r_type == R_ARM_THM_JUMP24))
3099 if (st_type == STT_ARM_TFUNC)
3101 /* Thumb to thumb. */
3104 stub_type = (info->shared | globals->pic_veneer)
3106 ? ((globals->use_blx
3107 && (r_type ==R_ARM_THM_CALL))
3108 /* V5T and above. Stub starts with ARM code, so
3109 we must be able to switch mode before
3110 reaching it, which is only possible for 'bl'
3111 (ie R_ARM_THM_CALL relocation). */
3112 ? arm_stub_long_branch_any_thumb_pic
3113 /* On V4T, use Thumb code only. */
3114 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3116 /* non-PIC stubs. */
3117 : ((globals->use_blx
3118 && (r_type ==R_ARM_THM_CALL))
3119 /* V5T and above. */
3120 ? arm_stub_long_branch_any_any
3122 : arm_stub_long_branch_v4t_thumb_thumb);
3126 stub_type = (info->shared | globals->pic_veneer)
3128 ? arm_stub_long_branch_thumb_only_pic
3130 : arm_stub_long_branch_thumb_only;
3137 && sym_sec->owner != NULL
3138 && !INTERWORK_FLAG (sym_sec->owner))
3140 (*_bfd_error_handler)
3141 (_("%B(%s): warning: interworking not enabled.\n"
3142 " first occurrence: %B: Thumb call to ARM"),
3143 sym_sec->owner, input_bfd, name);
3146 stub_type = (info->shared | globals->pic_veneer)
3148 ? ((globals->use_blx
3149 && (r_type ==R_ARM_THM_CALL))
3150 /* V5T and above. */
3151 ? arm_stub_long_branch_any_arm_pic
3153 : arm_stub_long_branch_v4t_thumb_arm_pic)
3155 /* non-PIC stubs. */
3156 : ((globals->use_blx
3157 && (r_type ==R_ARM_THM_CALL))
3158 /* V5T and above. */
3159 ? arm_stub_long_branch_any_any
3161 : arm_stub_long_branch_v4t_thumb_arm);
3163 /* Handle v4t short branches. */
3164 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3165 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3166 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3167 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3171 else if (r_type == R_ARM_CALL
3172 || r_type == R_ARM_JUMP24
3173 || r_type == R_ARM_PLT32)
3175 if (st_type == STT_ARM_TFUNC)
3180 && sym_sec->owner != NULL
3181 && !INTERWORK_FLAG (sym_sec->owner))
3183 (*_bfd_error_handler)
3184 (_("%B(%s): warning: interworking not enabled.\n"
3185 " first occurrence: %B: ARM call to Thumb"),
3186 sym_sec->owner, input_bfd, name);
3189 /* We have an extra 2-bytes reach because of
3190 the mode change (bit 24 (H) of BLX encoding). */
3191 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3192 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3193 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3194 || (r_type == R_ARM_JUMP24)
3195 || (r_type == R_ARM_PLT32))
3197 stub_type = (info->shared | globals->pic_veneer)
3199 ? ((globals->use_blx)
3200 /* V5T and above. */
3201 ? arm_stub_long_branch_any_thumb_pic
3203 : arm_stub_long_branch_v4t_arm_thumb_pic)
3205 /* non-PIC stubs. */
3206 : ((globals->use_blx)
3207 /* V5T and above. */
3208 ? arm_stub_long_branch_any_any
3210 : arm_stub_long_branch_v4t_arm_thumb);
3216 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3217 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3219 stub_type = (info->shared | globals->pic_veneer)
3221 ? arm_stub_long_branch_any_arm_pic
3222 /* non-PIC stubs. */
3223 : arm_stub_long_branch_any_any;
3228 /* If a stub is needed, record the actual destination type. */
3229 if (stub_type != arm_stub_none)
3230 *actual_st_type = st_type;
3235 /* Build a name for an entry in the stub hash table. */
3238 elf32_arm_stub_name (const asection *input_section,
3239 const asection *sym_sec,
3240 const struct elf32_arm_link_hash_entry *hash,
3241 const Elf_Internal_Rela *rel,
3242 enum elf32_arm_stub_type stub_type)
3249 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3250 stub_name = (char *) bfd_malloc (len);
3251 if (stub_name != NULL)
3252 sprintf (stub_name, "%08x_%s+%x_%d",
3253 input_section->id & 0xffffffff,
3254 hash->root.root.root.string,
3255 (int) rel->r_addend & 0xffffffff,
3260 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3261 stub_name = (char *) bfd_malloc (len);
3262 if (stub_name != NULL)
3263 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3264 input_section->id & 0xffffffff,
3265 sym_sec->id & 0xffffffff,
3266 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3267 (int) rel->r_addend & 0xffffffff,
3274 /* Look up an entry in the stub hash. Stub entries are cached because
3275 creating the stub name takes a bit of time. */
3277 static struct elf32_arm_stub_hash_entry *
3278 elf32_arm_get_stub_entry (const asection *input_section,
3279 const asection *sym_sec,
3280 struct elf_link_hash_entry *hash,
3281 const Elf_Internal_Rela *rel,
3282 struct elf32_arm_link_hash_table *htab,
3283 enum elf32_arm_stub_type stub_type)
3285 struct elf32_arm_stub_hash_entry *stub_entry;
3286 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3287 const asection *id_sec;
3289 if ((input_section->flags & SEC_CODE) == 0)
3292 /* If this input section is part of a group of sections sharing one
3293 stub section, then use the id of the first section in the group.
3294 Stub names need to include a section id, as there may well be
3295 more than one stub used to reach say, printf, and we need to
3296 distinguish between them. */
3297 id_sec = htab->stub_group[input_section->id].link_sec;
3299 if (h != NULL && h->stub_cache != NULL
3300 && h->stub_cache->h == h
3301 && h->stub_cache->id_sec == id_sec
3302 && h->stub_cache->stub_type == stub_type)
3304 stub_entry = h->stub_cache;
3310 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3311 if (stub_name == NULL)
3314 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3315 stub_name, FALSE, FALSE);
3317 h->stub_cache = stub_entry;
3325 /* Find or create a stub section. Returns a pointer to the stub section, and
3326 the section to which the stub section will be attached (in *LINK_SEC_P).
3327 LINK_SEC_P may be NULL. */
3330 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3331 struct elf32_arm_link_hash_table *htab)
3336 link_sec = htab->stub_group[section->id].link_sec;
3337 stub_sec = htab->stub_group[section->id].stub_sec;
3338 if (stub_sec == NULL)
3340 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3341 if (stub_sec == NULL)
3347 namelen = strlen (link_sec->name);
3348 len = namelen + sizeof (STUB_SUFFIX);
3349 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3353 memcpy (s_name, link_sec->name, namelen);
3354 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3355 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3356 if (stub_sec == NULL)
3358 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3360 htab->stub_group[section->id].stub_sec = stub_sec;
3364 *link_sec_p = link_sec;
3369 /* Add a new stub entry to the stub hash. Not all fields of the new
3370 stub entry are initialised. */
3372 static struct elf32_arm_stub_hash_entry *
3373 elf32_arm_add_stub (const char *stub_name,
3375 struct elf32_arm_link_hash_table *htab)
3379 struct elf32_arm_stub_hash_entry *stub_entry;
3381 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3382 if (stub_sec == NULL)
3385 /* Enter this entry into the linker stub hash table. */
3386 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3388 if (stub_entry == NULL)
3390 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3396 stub_entry->stub_sec = stub_sec;
3397 stub_entry->stub_offset = 0;
3398 stub_entry->id_sec = link_sec;
3403 /* Store an Arm insn into an output section not processed by
3404 elf32_arm_write_section. */
3407 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3408 bfd * output_bfd, bfd_vma val, void * ptr)
3410 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3411 bfd_putl32 (val, ptr);
3413 bfd_putb32 (val, ptr);
3416 /* Store a 16-bit Thumb insn into an output section not processed by
3417 elf32_arm_write_section. */
3420 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3421 bfd * output_bfd, bfd_vma val, void * ptr)
3423 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3424 bfd_putl16 (val, ptr);
3426 bfd_putb16 (val, ptr);
3429 static bfd_reloc_status_type elf32_arm_final_link_relocate
3430 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3431 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3432 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3435 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
3439 case arm_stub_a8_veneer_b_cond:
3440 case arm_stub_a8_veneer_b:
3441 case arm_stub_a8_veneer_bl:
3444 case arm_stub_long_branch_any_any:
3445 case arm_stub_long_branch_v4t_arm_thumb:
3446 case arm_stub_long_branch_thumb_only:
3447 case arm_stub_long_branch_v4t_thumb_thumb:
3448 case arm_stub_long_branch_v4t_thumb_arm:
3449 case arm_stub_short_branch_v4t_thumb_arm:
3450 case arm_stub_long_branch_any_arm_pic:
3451 case arm_stub_long_branch_any_thumb_pic:
3452 case arm_stub_long_branch_v4t_thumb_thumb_pic:
3453 case arm_stub_long_branch_v4t_arm_thumb_pic:
3454 case arm_stub_long_branch_v4t_thumb_arm_pic:
3455 case arm_stub_long_branch_thumb_only_pic:
3456 case arm_stub_a8_veneer_blx:
3460 abort (); /* Should be unreachable. */
3465 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3469 struct elf32_arm_stub_hash_entry *stub_entry;
3470 struct elf32_arm_link_hash_table *globals;
3471 struct bfd_link_info *info;
3478 const insn_sequence *template_sequence;
3480 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3481 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3484 /* Massage our args to the form they really have. */
3485 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3486 info = (struct bfd_link_info *) in_arg;
3488 globals = elf32_arm_hash_table (info);
3489 if (globals == NULL)
3492 stub_sec = stub_entry->stub_sec;
3494 if ((globals->fix_cortex_a8 < 0)
3495 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
3496 /* We have to do less-strictly-aligned fixes last. */
3499 /* Make a note of the offset within the stubs for this entry. */
3500 stub_entry->stub_offset = stub_sec->size;
3501 loc = stub_sec->contents + stub_entry->stub_offset;
3503 stub_bfd = stub_sec->owner;
3505 /* This is the address of the stub destination. */
3506 sym_value = (stub_entry->target_value
3507 + stub_entry->target_section->output_offset
3508 + stub_entry->target_section->output_section->vma);
3510 template_sequence = stub_entry->stub_template;
3511 template_size = stub_entry->stub_template_size;
3514 for (i = 0; i < template_size; i++)
3516 switch (template_sequence[i].type)
3520 bfd_vma data = (bfd_vma) template_sequence[i].data;
3521 if (template_sequence[i].reloc_addend != 0)
3523 /* We've borrowed the reloc_addend field to mean we should
3524 insert a condition code into this (Thumb-1 branch)
3525 instruction. See THUMB16_BCOND_INSN. */
3526 BFD_ASSERT ((data & 0xff00) == 0xd000);
3527 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3529 bfd_put_16 (stub_bfd, data, loc + size);
3535 bfd_put_16 (stub_bfd,
3536 (template_sequence[i].data >> 16) & 0xffff,
3538 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3540 if (template_sequence[i].r_type != R_ARM_NONE)
3542 stub_reloc_idx[nrelocs] = i;
3543 stub_reloc_offset[nrelocs++] = size;
3549 bfd_put_32 (stub_bfd, template_sequence[i].data,
3551 /* Handle cases where the target is encoded within the
3553 if (template_sequence[i].r_type == R_ARM_JUMP24)
3555 stub_reloc_idx[nrelocs] = i;
3556 stub_reloc_offset[nrelocs++] = size;
3562 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3563 stub_reloc_idx[nrelocs] = i;
3564 stub_reloc_offset[nrelocs++] = size;
3574 stub_sec->size += size;
3576 /* Stub size has already been computed in arm_size_one_stub. Check
3578 BFD_ASSERT (size == stub_entry->stub_size);
3580 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3581 if (stub_entry->st_type == STT_ARM_TFUNC)
3584 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3586 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3588 for (i = 0; i < nrelocs; i++)
3589 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3590 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3591 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3592 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3594 Elf_Internal_Rela rel;
3595 bfd_boolean unresolved_reloc;
3596 char *error_message;
3598 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3599 ? STT_ARM_TFUNC : 0;
3600 bfd_vma points_to = sym_value + stub_entry->target_addend;
3602 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3603 rel.r_info = ELF32_R_INFO (0,
3604 template_sequence[stub_reloc_idx[i]].r_type);
3605 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3607 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3608 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3609 template should refer back to the instruction after the original
3611 points_to = sym_value;
3613 /* There may be unintended consequences if this is not true. */
3614 BFD_ASSERT (stub_entry->h == NULL);
3616 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3617 properly. We should probably use this function unconditionally,
3618 rather than only for certain relocations listed in the enclosing
3619 conditional, for the sake of consistency. */
3620 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3621 (template_sequence[stub_reloc_idx[i]].r_type),
3622 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3623 points_to, info, stub_entry->target_section, "", sym_flags,
3624 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3629 Elf_Internal_Rela rel;
3630 bfd_boolean unresolved_reloc;
3631 char *error_message;
3632 bfd_vma points_to = sym_value + stub_entry->target_addend
3633 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3635 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3636 rel.r_info = ELF32_R_INFO (0,
3637 template_sequence[stub_reloc_idx[i]].r_type);
3640 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3641 (template_sequence[stub_reloc_idx[i]].r_type),
3642 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3643 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3644 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3652 /* Calculate the template, template size and instruction size for a stub.
3653 Return value is the instruction size. */
3656 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3657 const insn_sequence **stub_template,
3658 int *stub_template_size)
3660 const insn_sequence *template_sequence = NULL;
3661 int template_size = 0, i;
3664 template_sequence = stub_definitions[stub_type].template_sequence;
3666 *stub_template = template_sequence;
3668 template_size = stub_definitions[stub_type].template_size;
3669 if (stub_template_size)
3670 *stub_template_size = template_size;
3673 for (i = 0; i < template_size; i++)
3675 switch (template_sequence[i].type)
3696 /* As above, but don't actually build the stub. Just bump offset so
3697 we know stub section sizes. */
3700 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3701 void *in_arg ATTRIBUTE_UNUSED)
3703 struct elf32_arm_stub_hash_entry *stub_entry;
3704 const insn_sequence *template_sequence;
3705 int template_size, size;
3707 /* Massage our args to the form they really have. */
3708 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3710 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3711 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3713 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3716 stub_entry->stub_size = size;
3717 stub_entry->stub_template = template_sequence;
3718 stub_entry->stub_template_size = template_size;
3720 size = (size + 7) & ~7;
3721 stub_entry->stub_sec->size += size;
3726 /* External entry points for sizing and building linker stubs. */
3728 /* Set up various things so that we can make a list of input sections
3729 for each output section included in the link. Returns -1 on error,
3730 0 when no stubs will be needed, and 1 on success. */
3733 elf32_arm_setup_section_lists (bfd *output_bfd,
3734 struct bfd_link_info *info)
3737 unsigned int bfd_count;
3738 int top_id, top_index;
3740 asection **input_list, **list;
3742 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3746 if (! is_elf_hash_table (htab))
3749 /* Count the number of input BFDs and find the top input section id. */
3750 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3752 input_bfd = input_bfd->link_next)
3755 for (section = input_bfd->sections;
3757 section = section->next)
3759 if (top_id < section->id)
3760 top_id = section->id;
3763 htab->bfd_count = bfd_count;
3765 amt = sizeof (struct map_stub) * (top_id + 1);
3766 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3767 if (htab->stub_group == NULL)
3769 htab->top_id = top_id;
3771 /* We can't use output_bfd->section_count here to find the top output
3772 section index as some sections may have been removed, and
3773 _bfd_strip_section_from_output doesn't renumber the indices. */
3774 for (section = output_bfd->sections, top_index = 0;
3776 section = section->next)
3778 if (top_index < section->index)
3779 top_index = section->index;
3782 htab->top_index = top_index;
3783 amt = sizeof (asection *) * (top_index + 1);
3784 input_list = (asection **) bfd_malloc (amt);
3785 htab->input_list = input_list;
3786 if (input_list == NULL)
3789 /* For sections we aren't interested in, mark their entries with a
3790 value we can check later. */
3791 list = input_list + top_index;
3793 *list = bfd_abs_section_ptr;
3794 while (list-- != input_list);
3796 for (section = output_bfd->sections;
3798 section = section->next)
3800 if ((section->flags & SEC_CODE) != 0)
3801 input_list[section->index] = NULL;
3807 /* The linker repeatedly calls this function for each input section,
3808 in the order that input sections are linked into output sections.
3809 Build lists of input sections to determine groupings between which
3810 we may insert linker stubs. */
3813 elf32_arm_next_input_section (struct bfd_link_info *info,
3816 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3821 if (isec->output_section->index <= htab->top_index)
3823 asection **list = htab->input_list + isec->output_section->index;
3825 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3827 /* Steal the link_sec pointer for our list. */
3828 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3829 /* This happens to make the list in reverse order,
3830 which we reverse later. */
3831 PREV_SEC (isec) = *list;
3837 /* See whether we can group stub sections together. Grouping stub
3838 sections may result in fewer stubs. More importantly, we need to
3839 put all .init* and .fini* stubs at the end of the .init or
3840 .fini output sections respectively, because glibc splits the
3841 _init and _fini functions into multiple parts. Putting a stub in
3842 the middle of a function is not a good idea. */
3845 group_sections (struct elf32_arm_link_hash_table *htab,
3846 bfd_size_type stub_group_size,
3847 bfd_boolean stubs_always_after_branch)
3849 asection **list = htab->input_list;
3853 asection *tail = *list;
3856 if (tail == bfd_abs_section_ptr)
3859 /* Reverse the list: we must avoid placing stubs at the
3860 beginning of the section because the beginning of the text
3861 section may be required for an interrupt vector in bare metal
3863 #define NEXT_SEC PREV_SEC
3865 while (tail != NULL)
3867 /* Pop from tail. */
3868 asection *item = tail;
3869 tail = PREV_SEC (item);
3872 NEXT_SEC (item) = head;
3876 while (head != NULL)
3880 bfd_vma stub_group_start = head->output_offset;
3881 bfd_vma end_of_next;
3884 while (NEXT_SEC (curr) != NULL)
3886 next = NEXT_SEC (curr);
3887 end_of_next = next->output_offset + next->size;
3888 if (end_of_next - stub_group_start >= stub_group_size)
3889 /* End of NEXT is too far from start, so stop. */
3891 /* Add NEXT to the group. */
3895 /* OK, the size from the start to the start of CURR is less
3896 than stub_group_size and thus can be handled by one stub
3897 section. (Or the head section is itself larger than
3898 stub_group_size, in which case we may be toast.)
3899 We should really be keeping track of the total size of
3900 stubs added here, as stubs contribute to the final output
3904 next = NEXT_SEC (head);
3905 /* Set up this stub group. */
3906 htab->stub_group[head->id].link_sec = curr;
3908 while (head != curr && (head = next) != NULL);
3910 /* But wait, there's more! Input sections up to stub_group_size
3911 bytes after the stub section can be handled by it too. */
3912 if (!stubs_always_after_branch)
3914 stub_group_start = curr->output_offset + curr->size;
3916 while (next != NULL)
3918 end_of_next = next->output_offset + next->size;
3919 if (end_of_next - stub_group_start >= stub_group_size)
3920 /* End of NEXT is too far from stubs, so stop. */
3922 /* Add NEXT to the stub group. */
3924 next = NEXT_SEC (head);
3925 htab->stub_group[head->id].link_sec = curr;
3931 while (list++ != htab->input_list + htab->top_index);
3933 free (htab->input_list);
3938 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3942 a8_reloc_compare (const void *a, const void *b)
3944 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3945 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3947 if (ra->from < rb->from)
3949 else if (ra->from > rb->from)
3955 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3956 const char *, char **);
3958 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3959 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3960 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3964 cortex_a8_erratum_scan (bfd *input_bfd,
3965 struct bfd_link_info *info,
3966 struct a8_erratum_fix **a8_fixes_p,
3967 unsigned int *num_a8_fixes_p,
3968 unsigned int *a8_fix_table_size_p,
3969 struct a8_erratum_reloc *a8_relocs,
3970 unsigned int num_a8_relocs,
3971 unsigned prev_num_a8_fixes,
3972 bfd_boolean *stub_changed_p)
3975 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3976 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3977 unsigned int num_a8_fixes = *num_a8_fixes_p;
3978 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3983 for (section = input_bfd->sections;
3985 section = section->next)
3987 bfd_byte *contents = NULL;
3988 struct _arm_elf_section_data *sec_data;
3992 if (elf_section_type (section) != SHT_PROGBITS
3993 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3994 || (section->flags & SEC_EXCLUDE) != 0
3995 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3996 || (section->output_section == bfd_abs_section_ptr))
3999 base_vma = section->output_section->vma + section->output_offset;
4001 if (elf_section_data (section)->this_hdr.contents != NULL)
4002 contents = elf_section_data (section)->this_hdr.contents;
4003 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4006 sec_data = elf32_arm_section_data (section);
4008 for (span = 0; span < sec_data->mapcount; span++)
4010 unsigned int span_start = sec_data->map[span].vma;
4011 unsigned int span_end = (span == sec_data->mapcount - 1)
4012 ? section->size : sec_data->map[span + 1].vma;
4014 char span_type = sec_data->map[span].type;
4015 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4017 if (span_type != 't')
4020 /* Span is entirely within a single 4KB region: skip scanning. */
4021 if (((base_vma + span_start) & ~0xfff)
4022 == ((base_vma + span_end) & ~0xfff))
4025 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4027 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4028 * The branch target is in the same 4KB region as the
4029 first half of the branch.
4030 * The instruction before the branch is a 32-bit
4031 length non-branch instruction. */
4032 for (i = span_start; i < span_end;)
4034 unsigned int insn = bfd_getl16 (&contents[i]);
4035 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4036 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4038 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4043 /* Load the rest of the insn (in manual-friendly order). */
4044 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4046 /* Encoding T4: B<c>.W. */
4047 is_b = (insn & 0xf800d000) == 0xf0009000;
4048 /* Encoding T1: BL<c>.W. */
4049 is_bl = (insn & 0xf800d000) == 0xf000d000;
4050 /* Encoding T2: BLX<c>.W. */
4051 is_blx = (insn & 0xf800d000) == 0xf000c000;
4052 /* Encoding T3: B<c>.W (not permitted in IT block). */
4053 is_bcc = (insn & 0xf800d000) == 0xf0008000
4054 && (insn & 0x07f00000) != 0x03800000;
4057 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4059 if (((base_vma + i) & 0xfff) == 0xffe
4063 && ! last_was_branch)
4065 bfd_signed_vma offset = 0;
4066 bfd_boolean force_target_arm = FALSE;
4067 bfd_boolean force_target_thumb = FALSE;
4069 enum elf32_arm_stub_type stub_type = arm_stub_none;
4070 struct a8_erratum_reloc key, *found;
4072 key.from = base_vma + i;
4073 found = (struct a8_erratum_reloc *)
4074 bsearch (&key, a8_relocs, num_a8_relocs,
4075 sizeof (struct a8_erratum_reloc),
4080 char *error_message = NULL;
4081 struct elf_link_hash_entry *entry;
4082 bfd_boolean use_plt = FALSE;
4084 /* We don't care about the error returned from this
4085 function, only if there is glue or not. */
4086 entry = find_thumb_glue (info, found->sym_name,
4090 found->non_a8_stub = TRUE;
4092 /* Keep a simpler condition, for the sake of clarity. */
4093 if (htab->root.splt != NULL && found->hash != NULL
4094 && found->hash->root.plt.offset != (bfd_vma) -1)
4097 if (found->r_type == R_ARM_THM_CALL)
4099 if (found->st_type != STT_ARM_TFUNC || use_plt)
4100 force_target_arm = TRUE;
4102 force_target_thumb = TRUE;
4106 /* Check if we have an offending branch instruction. */
4108 if (found && found->non_a8_stub)
4109 /* We've already made a stub for this instruction, e.g.
4110 it's a long branch or a Thumb->ARM stub. Assume that
4111 stub will suffice to work around the A8 erratum (see
4112 setting of always_after_branch above). */
4116 offset = (insn & 0x7ff) << 1;
4117 offset |= (insn & 0x3f0000) >> 4;
4118 offset |= (insn & 0x2000) ? 0x40000 : 0;
4119 offset |= (insn & 0x800) ? 0x80000 : 0;
4120 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4121 if (offset & 0x100000)
4122 offset |= ~ ((bfd_signed_vma) 0xfffff);
4123 stub_type = arm_stub_a8_veneer_b_cond;
4125 else if (is_b || is_bl || is_blx)
4127 int s = (insn & 0x4000000) != 0;
4128 int j1 = (insn & 0x2000) != 0;
4129 int j2 = (insn & 0x800) != 0;
4133 offset = (insn & 0x7ff) << 1;
4134 offset |= (insn & 0x3ff0000) >> 4;
4138 if (offset & 0x1000000)
4139 offset |= ~ ((bfd_signed_vma) 0xffffff);
4142 offset &= ~ ((bfd_signed_vma) 3);
4144 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4145 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4148 if (stub_type != arm_stub_none)
4150 bfd_vma pc_for_insn = base_vma + i + 4;
4152 /* The original instruction is a BL, but the target is
4153 an ARM instruction. If we were not making a stub,
4154 the BL would have been converted to a BLX. Use the
4155 BLX stub instead in that case. */
4156 if (htab->use_blx && force_target_arm
4157 && stub_type == arm_stub_a8_veneer_bl)
4159 stub_type = arm_stub_a8_veneer_blx;
4163 /* Conversely, if the original instruction was
4164 BLX but the target is Thumb mode, use the BL
4166 else if (force_target_thumb
4167 && stub_type == arm_stub_a8_veneer_blx)
4169 stub_type = arm_stub_a8_veneer_bl;
4175 pc_for_insn &= ~ ((bfd_vma) 3);
4177 /* If we found a relocation, use the proper destination,
4178 not the offset in the (unrelocated) instruction.
4179 Note this is always done if we switched the stub type
4183 (bfd_signed_vma) (found->destination - pc_for_insn);
4185 target = pc_for_insn + offset;
4187 /* The BLX stub is ARM-mode code. Adjust the offset to
4188 take the different PC value (+8 instead of +4) into
4190 if (stub_type == arm_stub_a8_veneer_blx)
4193 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4195 char *stub_name = NULL;
4197 if (num_a8_fixes == a8_fix_table_size)
4199 a8_fix_table_size *= 2;
4200 a8_fixes = (struct a8_erratum_fix *)
4201 bfd_realloc (a8_fixes,
4202 sizeof (struct a8_erratum_fix)
4203 * a8_fix_table_size);
4206 if (num_a8_fixes < prev_num_a8_fixes)
4208 /* If we're doing a subsequent scan,
4209 check if we've found the same fix as
4210 before, and try and reuse the stub
4212 stub_name = a8_fixes[num_a8_fixes].stub_name;
4213 if ((a8_fixes[num_a8_fixes].section != section)
4214 || (a8_fixes[num_a8_fixes].offset != i))
4218 *stub_changed_p = TRUE;
4224 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4225 if (stub_name != NULL)
4226 sprintf (stub_name, "%x:%x", section->id, i);
4229 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4230 a8_fixes[num_a8_fixes].section = section;
4231 a8_fixes[num_a8_fixes].offset = i;
4232 a8_fixes[num_a8_fixes].addend = offset;
4233 a8_fixes[num_a8_fixes].orig_insn = insn;
4234 a8_fixes[num_a8_fixes].stub_name = stub_name;
4235 a8_fixes[num_a8_fixes].stub_type = stub_type;
4236 a8_fixes[num_a8_fixes].st_type =
4237 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4244 i += insn_32bit ? 4 : 2;
4245 last_was_32bit = insn_32bit;
4246 last_was_branch = is_32bit_branch;
4250 if (elf_section_data (section)->this_hdr.contents == NULL)
4254 *a8_fixes_p = a8_fixes;
4255 *num_a8_fixes_p = num_a8_fixes;
4256 *a8_fix_table_size_p = a8_fix_table_size;
4261 /* Determine and set the size of the stub section for a final link.
4263 The basic idea here is to examine all the relocations looking for
4264 PC-relative calls to a target that is unreachable with a "bl"
4268 elf32_arm_size_stubs (bfd *output_bfd,
4270 struct bfd_link_info *info,
4271 bfd_signed_vma group_size,
4272 asection * (*add_stub_section) (const char *, asection *),
4273 void (*layout_sections_again) (void))
4275 bfd_size_type stub_group_size;
4276 bfd_boolean stubs_always_after_branch;
4277 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4278 struct a8_erratum_fix *a8_fixes = NULL;
4279 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4280 struct a8_erratum_reloc *a8_relocs = NULL;
4281 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4286 if (htab->fix_cortex_a8)
4288 a8_fixes = (struct a8_erratum_fix *)
4289 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4290 a8_relocs = (struct a8_erratum_reloc *)
4291 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4294 /* Propagate mach to stub bfd, because it may not have been
4295 finalized when we created stub_bfd. */
4296 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4297 bfd_get_mach (output_bfd));
4299 /* Stash our params away. */
4300 htab->stub_bfd = stub_bfd;
4301 htab->add_stub_section = add_stub_section;
4302 htab->layout_sections_again = layout_sections_again;
4303 stubs_always_after_branch = group_size < 0;
4305 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4306 as the first half of a 32-bit branch straddling two 4K pages. This is a
4307 crude way of enforcing that. */
4308 if (htab->fix_cortex_a8)
4309 stubs_always_after_branch = 1;
4312 stub_group_size = -group_size;
4314 stub_group_size = group_size;
4316 if (stub_group_size == 1)
4318 /* Default values. */
4319 /* Thumb branch range is +-4MB has to be used as the default
4320 maximum size (a given section can contain both ARM and Thumb
4321 code, so the worst case has to be taken into account).
4323 This value is 24K less than that, which allows for 2025
4324 12-byte stubs. If we exceed that, then we will fail to link.
4325 The user will have to relink with an explicit group size
4327 stub_group_size = 4170000;
4330 group_sections (htab, stub_group_size, stubs_always_after_branch);
4332 /* If we're applying the cortex A8 fix, we need to determine the
4333 program header size now, because we cannot change it later --
4334 that could alter section placements. Notice the A8 erratum fix
4335 ends up requiring the section addresses to remain unchanged
4336 modulo the page size. That's something we cannot represent
4337 inside BFD, and we don't want to force the section alignment to
4338 be the page size. */
4339 if (htab->fix_cortex_a8)
4340 (*htab->layout_sections_again) ();
4345 unsigned int bfd_indx;
4347 bfd_boolean stub_changed = FALSE;
4348 unsigned prev_num_a8_fixes = num_a8_fixes;
4351 for (input_bfd = info->input_bfds, bfd_indx = 0;
4353 input_bfd = input_bfd->link_next, bfd_indx++)
4355 Elf_Internal_Shdr *symtab_hdr;
4357 Elf_Internal_Sym *local_syms = NULL;
4361 /* We'll need the symbol table in a second. */
4362 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4363 if (symtab_hdr->sh_info == 0)
4366 /* Walk over each section attached to the input bfd. */
4367 for (section = input_bfd->sections;
4369 section = section->next)
4371 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4373 /* If there aren't any relocs, then there's nothing more
4375 if ((section->flags & SEC_RELOC) == 0
4376 || section->reloc_count == 0
4377 || (section->flags & SEC_CODE) == 0)
4380 /* If this section is a link-once section that will be
4381 discarded, then don't create any stubs. */
4382 if (section->output_section == NULL
4383 || section->output_section->owner != output_bfd)
4386 /* Get the relocs. */
4388 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4389 NULL, info->keep_memory);
4390 if (internal_relocs == NULL)
4391 goto error_ret_free_local;
4393 /* Now examine each relocation. */
4394 irela = internal_relocs;
4395 irelaend = irela + section->reloc_count;
4396 for (; irela < irelaend; irela++)
4398 unsigned int r_type, r_indx;
4399 enum elf32_arm_stub_type stub_type;
4400 struct elf32_arm_stub_hash_entry *stub_entry;
4403 bfd_vma destination;
4404 struct elf32_arm_link_hash_entry *hash;
4405 const char *sym_name;
4407 const asection *id_sec;
4409 bfd_boolean created_stub = FALSE;
4411 r_type = ELF32_R_TYPE (irela->r_info);
4412 r_indx = ELF32_R_SYM (irela->r_info);
4414 if (r_type >= (unsigned int) R_ARM_max)
4416 bfd_set_error (bfd_error_bad_value);
4417 error_ret_free_internal:
4418 if (elf_section_data (section)->relocs == NULL)
4419 free (internal_relocs);
4420 goto error_ret_free_local;
4423 /* Only look for stubs on branch instructions. */
4424 if ((r_type != (unsigned int) R_ARM_CALL)
4425 && (r_type != (unsigned int) R_ARM_THM_CALL)
4426 && (r_type != (unsigned int) R_ARM_JUMP24)
4427 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4428 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4429 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4430 && (r_type != (unsigned int) R_ARM_PLT32))
4433 /* Now determine the call target, its name, value,
4440 if (r_indx < symtab_hdr->sh_info)
4442 /* It's a local symbol. */
4443 Elf_Internal_Sym *sym;
4445 if (local_syms == NULL)
4448 = (Elf_Internal_Sym *) symtab_hdr->contents;
4449 if (local_syms == NULL)
4451 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4452 symtab_hdr->sh_info, 0,
4454 if (local_syms == NULL)
4455 goto error_ret_free_internal;
4458 sym = local_syms + r_indx;
4459 if (sym->st_shndx == SHN_UNDEF)
4460 sym_sec = bfd_und_section_ptr;
4461 else if (sym->st_shndx == SHN_ABS)
4462 sym_sec = bfd_abs_section_ptr;
4463 else if (sym->st_shndx == SHN_COMMON)
4464 sym_sec = bfd_com_section_ptr;
4467 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
4470 /* This is an undefined symbol. It can never
4474 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4475 sym_value = sym->st_value;
4476 destination = (sym_value + irela->r_addend
4477 + sym_sec->output_offset
4478 + sym_sec->output_section->vma);
4479 st_type = ELF_ST_TYPE (sym->st_info);
4481 = bfd_elf_string_from_elf_section (input_bfd,
4482 symtab_hdr->sh_link,
4487 /* It's an external symbol. */
4490 e_indx = r_indx - symtab_hdr->sh_info;
4491 hash = ((struct elf32_arm_link_hash_entry *)
4492 elf_sym_hashes (input_bfd)[e_indx]);
4494 while (hash->root.root.type == bfd_link_hash_indirect
4495 || hash->root.root.type == bfd_link_hash_warning)
4496 hash = ((struct elf32_arm_link_hash_entry *)
4497 hash->root.root.u.i.link);
4499 if (hash->root.root.type == bfd_link_hash_defined
4500 || hash->root.root.type == bfd_link_hash_defweak)
4502 sym_sec = hash->root.root.u.def.section;
4503 sym_value = hash->root.root.u.def.value;
4505 struct elf32_arm_link_hash_table *globals =
4506 elf32_arm_hash_table (info);
4508 /* For a destination in a shared library,
4509 use the PLT stub as target address to
4510 decide whether a branch stub is
4513 && globals->root.splt != NULL
4515 && hash->root.plt.offset != (bfd_vma) -1)
4517 sym_sec = globals->root.splt;
4518 sym_value = hash->root.plt.offset;
4519 if (sym_sec->output_section != NULL)
4520 destination = (sym_value
4521 + sym_sec->output_offset
4522 + sym_sec->output_section->vma);
4524 else if (sym_sec->output_section != NULL)
4525 destination = (sym_value + irela->r_addend
4526 + sym_sec->output_offset
4527 + sym_sec->output_section->vma);
4529 else if ((hash->root.root.type == bfd_link_hash_undefined)
4530 || (hash->root.root.type == bfd_link_hash_undefweak))
4532 /* For a shared library, use the PLT stub as
4533 target address to decide whether a long
4534 branch stub is needed.
4535 For absolute code, they cannot be handled. */
4536 struct elf32_arm_link_hash_table *globals =
4537 elf32_arm_hash_table (info);
4540 && globals->root.splt != NULL
4542 && hash->root.plt.offset != (bfd_vma) -1)
4544 sym_sec = globals->root.splt;
4545 sym_value = hash->root.plt.offset;
4546 if (sym_sec->output_section != NULL)
4547 destination = (sym_value
4548 + sym_sec->output_offset
4549 + sym_sec->output_section->vma);
4556 bfd_set_error (bfd_error_bad_value);
4557 goto error_ret_free_internal;
4559 st_type = ELF_ST_TYPE (hash->root.type);
4560 sym_name = hash->root.root.root.string;
4565 /* Determine what (if any) linker stub is needed. */
4566 stub_type = arm_type_of_stub (info, section, irela,
4568 destination, sym_sec,
4569 input_bfd, sym_name);
4570 if (stub_type == arm_stub_none)
4573 /* Support for grouping stub sections. */
4574 id_sec = htab->stub_group[section->id].link_sec;
4576 /* Get the name of this stub. */
4577 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4580 goto error_ret_free_internal;
4582 /* We've either created a stub for this reloc already,
4583 or we are about to. */
4584 created_stub = TRUE;
4586 stub_entry = arm_stub_hash_lookup
4587 (&htab->stub_hash_table, stub_name,
4589 if (stub_entry != NULL)
4591 /* The proper stub has already been created. */
4593 stub_entry->target_value = sym_value;
4597 stub_entry = elf32_arm_add_stub (stub_name, section,
4599 if (stub_entry == NULL)
4602 goto error_ret_free_internal;
4605 stub_entry->target_value = sym_value;
4606 stub_entry->target_section = sym_sec;
4607 stub_entry->stub_type = stub_type;
4608 stub_entry->h = hash;
4609 stub_entry->st_type = st_type;
4611 if (sym_name == NULL)
4612 sym_name = "unnamed";
4613 stub_entry->output_name = (char *)
4614 bfd_alloc (htab->stub_bfd,
4615 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4616 + strlen (sym_name));
4617 if (stub_entry->output_name == NULL)
4620 goto error_ret_free_internal;
4623 /* For historical reasons, use the existing names for
4624 ARM-to-Thumb and Thumb-to-ARM stubs. */
4625 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4626 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4627 && st_type != STT_ARM_TFUNC)
4628 sprintf (stub_entry->output_name,
4629 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4630 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4631 || (r_type == (unsigned int) R_ARM_JUMP24))
4632 && st_type == STT_ARM_TFUNC)
4633 sprintf (stub_entry->output_name,
4634 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4636 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4639 stub_changed = TRUE;
4643 /* Look for relocations which might trigger Cortex-A8
4645 if (htab->fix_cortex_a8
4646 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4647 || r_type == (unsigned int) R_ARM_THM_JUMP19
4648 || r_type == (unsigned int) R_ARM_THM_CALL
4649 || r_type == (unsigned int) R_ARM_THM_XPC22))
4651 bfd_vma from = section->output_section->vma
4652 + section->output_offset
4655 if ((from & 0xfff) == 0xffe)
4657 /* Found a candidate. Note we haven't checked the
4658 destination is within 4K here: if we do so (and
4659 don't create an entry in a8_relocs) we can't tell
4660 that a branch should have been relocated when
4662 if (num_a8_relocs == a8_reloc_table_size)
4664 a8_reloc_table_size *= 2;
4665 a8_relocs = (struct a8_erratum_reloc *)
4666 bfd_realloc (a8_relocs,
4667 sizeof (struct a8_erratum_reloc)
4668 * a8_reloc_table_size);
4671 a8_relocs[num_a8_relocs].from = from;
4672 a8_relocs[num_a8_relocs].destination = destination;
4673 a8_relocs[num_a8_relocs].r_type = r_type;
4674 a8_relocs[num_a8_relocs].st_type = st_type;
4675 a8_relocs[num_a8_relocs].sym_name = sym_name;
4676 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4677 a8_relocs[num_a8_relocs].hash = hash;
4684 /* We're done with the internal relocs, free them. */
4685 if (elf_section_data (section)->relocs == NULL)
4686 free (internal_relocs);
4689 if (htab->fix_cortex_a8)
4691 /* Sort relocs which might apply to Cortex-A8 erratum. */
4692 qsort (a8_relocs, num_a8_relocs,
4693 sizeof (struct a8_erratum_reloc),
4696 /* Scan for branches which might trigger Cortex-A8 erratum. */
4697 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4698 &num_a8_fixes, &a8_fix_table_size,
4699 a8_relocs, num_a8_relocs,
4700 prev_num_a8_fixes, &stub_changed)
4702 goto error_ret_free_local;
4706 if (prev_num_a8_fixes != num_a8_fixes)
4707 stub_changed = TRUE;
4712 /* OK, we've added some stubs. Find out the new size of the
4714 for (stub_sec = htab->stub_bfd->sections;
4716 stub_sec = stub_sec->next)
4718 /* Ignore non-stub sections. */
4719 if (!strstr (stub_sec->name, STUB_SUFFIX))
4725 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4727 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4728 if (htab->fix_cortex_a8)
4729 for (i = 0; i < num_a8_fixes; i++)
4731 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4732 a8_fixes[i].section, htab);
4734 if (stub_sec == NULL)
4735 goto error_ret_free_local;
4738 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4743 /* Ask the linker to do its stuff. */
4744 (*htab->layout_sections_again) ();
4747 /* Add stubs for Cortex-A8 erratum fixes now. */
4748 if (htab->fix_cortex_a8)
4750 for (i = 0; i < num_a8_fixes; i++)
4752 struct elf32_arm_stub_hash_entry *stub_entry;
4753 char *stub_name = a8_fixes[i].stub_name;
4754 asection *section = a8_fixes[i].section;
4755 unsigned int section_id = a8_fixes[i].section->id;
4756 asection *link_sec = htab->stub_group[section_id].link_sec;
4757 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4758 const insn_sequence *template_sequence;
4759 int template_size, size = 0;
4761 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4763 if (stub_entry == NULL)
4765 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4771 stub_entry->stub_sec = stub_sec;
4772 stub_entry->stub_offset = 0;
4773 stub_entry->id_sec = link_sec;
4774 stub_entry->stub_type = a8_fixes[i].stub_type;
4775 stub_entry->target_section = a8_fixes[i].section;
4776 stub_entry->target_value = a8_fixes[i].offset;
4777 stub_entry->target_addend = a8_fixes[i].addend;
4778 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4779 stub_entry->st_type = a8_fixes[i].st_type;
4781 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4785 stub_entry->stub_size = size;
4786 stub_entry->stub_template = template_sequence;
4787 stub_entry->stub_template_size = template_size;
4790 /* Stash the Cortex-A8 erratum fix array for use later in
4791 elf32_arm_write_section(). */
4792 htab->a8_erratum_fixes = a8_fixes;
4793 htab->num_a8_erratum_fixes = num_a8_fixes;
4797 htab->a8_erratum_fixes = NULL;
4798 htab->num_a8_erratum_fixes = 0;
4802 error_ret_free_local:
4806 /* Build all the stubs associated with the current output file. The
4807 stubs are kept in a hash table attached to the main linker hash
4808 table. We also set up the .plt entries for statically linked PIC
4809 functions here. This function is called via arm_elf_finish in the
4813 elf32_arm_build_stubs (struct bfd_link_info *info)
4816 struct bfd_hash_table *table;
4817 struct elf32_arm_link_hash_table *htab;
4819 htab = elf32_arm_hash_table (info);
4823 for (stub_sec = htab->stub_bfd->sections;
4825 stub_sec = stub_sec->next)
4829 /* Ignore non-stub sections. */
4830 if (!strstr (stub_sec->name, STUB_SUFFIX))
4833 /* Allocate memory to hold the linker stubs. */
4834 size = stub_sec->size;
4835 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4836 if (stub_sec->contents == NULL && size != 0)
4841 /* Build the stubs as directed by the stub hash table. */
4842 table = &htab->stub_hash_table;
4843 bfd_hash_traverse (table, arm_build_one_stub, info);
4844 if (htab->fix_cortex_a8)
4846 /* Place the cortex a8 stubs last. */
4847 htab->fix_cortex_a8 = -1;
4848 bfd_hash_traverse (table, arm_build_one_stub, info);
4854 /* Locate the Thumb encoded calling stub for NAME. */
4856 static struct elf_link_hash_entry *
4857 find_thumb_glue (struct bfd_link_info *link_info,
4859 char **error_message)
4862 struct elf_link_hash_entry *hash;
4863 struct elf32_arm_link_hash_table *hash_table;
4865 /* We need a pointer to the armelf specific hash table. */
4866 hash_table = elf32_arm_hash_table (link_info);
4867 if (hash_table == NULL)
4870 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4871 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4873 BFD_ASSERT (tmp_name);
4875 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4877 hash = elf_link_hash_lookup
4878 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4881 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4882 tmp_name, name) == -1)
4883 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4890 /* Locate the ARM encoded calling stub for NAME. */
4892 static struct elf_link_hash_entry *
4893 find_arm_glue (struct bfd_link_info *link_info,
4895 char **error_message)
4898 struct elf_link_hash_entry *myh;
4899 struct elf32_arm_link_hash_table *hash_table;
4901 /* We need a pointer to the elfarm specific hash table. */
4902 hash_table = elf32_arm_hash_table (link_info);
4903 if (hash_table == NULL)
4906 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4907 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4909 BFD_ASSERT (tmp_name);
4911 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4913 myh = elf_link_hash_lookup
4914 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4917 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4918 tmp_name, name) == -1)
4919 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4926 /* ARM->Thumb glue (static images):
4930 ldr r12, __func_addr
4933 .word func @ behave as if you saw a ARM_32 reloc.
4940 .word func @ behave as if you saw a ARM_32 reloc.
4942 (relocatable images)
4945 ldr r12, __func_offset
4951 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4952 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4953 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4954 static const insn32 a2t3_func_addr_insn = 0x00000001;
4956 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4957 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4958 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4960 #define ARM2THUMB_PIC_GLUE_SIZE 16
4961 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4962 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4963 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4965 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4969 __func_from_thumb: __func_from_thumb:
4971 nop ldr r6, __func_addr
4981 #define THUMB2ARM_GLUE_SIZE 8
4982 static const insn16 t2a1_bx_pc_insn = 0x4778;
4983 static const insn16 t2a2_noop_insn = 0x46c0;
4984 static const insn32 t2a3_b_insn = 0xea000000;
4986 #define VFP11_ERRATUM_VENEER_SIZE 8
4988 #define ARM_BX_VENEER_SIZE 12
4989 static const insn32 armbx1_tst_insn = 0xe3100001;
4990 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4991 static const insn32 armbx3_bx_insn = 0xe12fff10;
4993 #ifndef ELFARM_NABI_C_INCLUDED
4995 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4998 bfd_byte * contents;
5002 /* Do not include empty glue sections in the output. */
5005 s = bfd_get_section_by_name (abfd, name);
5007 s->flags |= SEC_EXCLUDE;
5012 BFD_ASSERT (abfd != NULL);
5014 s = bfd_get_section_by_name (abfd, name);
5015 BFD_ASSERT (s != NULL);
5017 contents = (bfd_byte *) bfd_alloc (abfd, size);
5019 BFD_ASSERT (s->size == size);
5020 s->contents = contents;
5024 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5026 struct elf32_arm_link_hash_table * globals;
5028 globals = elf32_arm_hash_table (info);
5029 BFD_ASSERT (globals != NULL);
5031 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5032 globals->arm_glue_size,
5033 ARM2THUMB_GLUE_SECTION_NAME);
5035 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5036 globals->thumb_glue_size,
5037 THUMB2ARM_GLUE_SECTION_NAME);
5039 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5040 globals->vfp11_erratum_glue_size,
5041 VFP11_ERRATUM_VENEER_SECTION_NAME);
5043 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5044 globals->bx_glue_size,
5045 ARM_BX_GLUE_SECTION_NAME);
5050 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5051 returns the symbol identifying the stub. */
5053 static struct elf_link_hash_entry *
5054 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5055 struct elf_link_hash_entry * h)
5057 const char * name = h->root.root.string;
5060 struct elf_link_hash_entry * myh;
5061 struct bfd_link_hash_entry * bh;
5062 struct elf32_arm_link_hash_table * globals;
5066 globals = elf32_arm_hash_table (link_info);
5067 BFD_ASSERT (globals != NULL);
5068 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5070 s = bfd_get_section_by_name
5071 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5073 BFD_ASSERT (s != NULL);
5075 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5076 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5078 BFD_ASSERT (tmp_name);
5080 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5082 myh = elf_link_hash_lookup
5083 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5087 /* We've already seen this guy. */
5092 /* The only trick here is using hash_table->arm_glue_size as the value.
5093 Even though the section isn't allocated yet, this is where we will be
5094 putting it. The +1 on the value marks that the stub has not been
5095 output yet - not that it is a Thumb function. */
5097 val = globals->arm_glue_size + 1;
5098 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5099 tmp_name, BSF_GLOBAL, s, val,
5100 NULL, TRUE, FALSE, &bh);
5102 myh = (struct elf_link_hash_entry *) bh;
5103 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5104 myh->forced_local = 1;
5108 if (link_info->shared || globals->root.is_relocatable_executable
5109 || globals->pic_veneer)
5110 size = ARM2THUMB_PIC_GLUE_SIZE;
5111 else if (globals->use_blx)
5112 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5114 size = ARM2THUMB_STATIC_GLUE_SIZE;
5117 globals->arm_glue_size += size;
5122 /* Allocate space for ARMv4 BX veneers. */
5125 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5128 struct elf32_arm_link_hash_table *globals;
5130 struct elf_link_hash_entry *myh;
5131 struct bfd_link_hash_entry *bh;
5134 /* BX PC does not need a veneer. */
5138 globals = elf32_arm_hash_table (link_info);
5139 BFD_ASSERT (globals != NULL);
5140 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5142 /* Check if this veneer has already been allocated. */
5143 if (globals->bx_glue_offset[reg])
5146 s = bfd_get_section_by_name
5147 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5149 BFD_ASSERT (s != NULL);
5151 /* Add symbol for veneer. */
5153 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5155 BFD_ASSERT (tmp_name);
5157 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5159 myh = elf_link_hash_lookup
5160 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5162 BFD_ASSERT (myh == NULL);
5165 val = globals->bx_glue_size;
5166 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5167 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5168 NULL, TRUE, FALSE, &bh);
5170 myh = (struct elf_link_hash_entry *) bh;
5171 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5172 myh->forced_local = 1;
5174 s->size += ARM_BX_VENEER_SIZE;
5175 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5176 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5180 /* Add an entry to the code/data map for section SEC. */
5183 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5185 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5186 unsigned int newidx;
5188 if (sec_data->map == NULL)
5190 sec_data->map = (elf32_arm_section_map *)
5191 bfd_malloc (sizeof (elf32_arm_section_map));
5192 sec_data->mapcount = 0;
5193 sec_data->mapsize = 1;
5196 newidx = sec_data->mapcount++;
5198 if (sec_data->mapcount > sec_data->mapsize)
5200 sec_data->mapsize *= 2;
5201 sec_data->map = (elf32_arm_section_map *)
5202 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5203 * sizeof (elf32_arm_section_map));
5208 sec_data->map[newidx].vma = vma;
5209 sec_data->map[newidx].type = type;
5214 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5215 veneers are handled for now. */
5218 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5219 elf32_vfp11_erratum_list *branch,
5221 asection *branch_sec,
5222 unsigned int offset)
5225 struct elf32_arm_link_hash_table *hash_table;
5227 struct elf_link_hash_entry *myh;
5228 struct bfd_link_hash_entry *bh;
5230 struct _arm_elf_section_data *sec_data;
5231 elf32_vfp11_erratum_list *newerr;
5233 hash_table = elf32_arm_hash_table (link_info);
5234 BFD_ASSERT (hash_table != NULL);
5235 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5237 s = bfd_get_section_by_name
5238 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5240 sec_data = elf32_arm_section_data (s);
5242 BFD_ASSERT (s != NULL);
5244 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5245 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5247 BFD_ASSERT (tmp_name);
5249 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5250 hash_table->num_vfp11_fixes);
5252 myh = elf_link_hash_lookup
5253 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5255 BFD_ASSERT (myh == NULL);
5258 val = hash_table->vfp11_erratum_glue_size;
5259 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5260 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5261 NULL, TRUE, FALSE, &bh);
5263 myh = (struct elf_link_hash_entry *) bh;
5264 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5265 myh->forced_local = 1;
5267 /* Link veneer back to calling location. */
5268 sec_data->erratumcount += 1;
5269 newerr = (elf32_vfp11_erratum_list *)
5270 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5272 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5274 newerr->u.v.branch = branch;
5275 newerr->u.v.id = hash_table->num_vfp11_fixes;
5276 branch->u.b.veneer = newerr;
5278 newerr->next = sec_data->erratumlist;
5279 sec_data->erratumlist = newerr;
5281 /* A symbol for the return from the veneer. */
5282 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5283 hash_table->num_vfp11_fixes);
5285 myh = elf_link_hash_lookup
5286 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5293 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5294 branch_sec, val, NULL, TRUE, FALSE, &bh);
5296 myh = (struct elf_link_hash_entry *) bh;
5297 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5298 myh->forced_local = 1;
5302 /* Generate a mapping symbol for the veneer section, and explicitly add an
5303 entry for that symbol to the code/data map for the section. */
5304 if (hash_table->vfp11_erratum_glue_size == 0)
5307 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5308 ever requires this erratum fix. */
5309 _bfd_generic_link_add_one_symbol (link_info,
5310 hash_table->bfd_of_glue_owner, "$a",
5311 BSF_LOCAL, s, 0, NULL,
5314 myh = (struct elf_link_hash_entry *) bh;
5315 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5316 myh->forced_local = 1;
5318 /* The elf32_arm_init_maps function only cares about symbols from input
5319 BFDs. We must make a note of this generated mapping symbol
5320 ourselves so that code byteswapping works properly in
5321 elf32_arm_write_section. */
5322 elf32_arm_section_map_add (s, 'a', 0);
5325 s->size += VFP11_ERRATUM_VENEER_SIZE;
5326 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5327 hash_table->num_vfp11_fixes++;
5329 /* The offset of the veneer. */
5333 #define ARM_GLUE_SECTION_FLAGS \
5334 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5335 | SEC_READONLY | SEC_LINKER_CREATED)
5337 /* Create a fake section for use by the ARM backend of the linker. */
5340 arm_make_glue_section (bfd * abfd, const char * name)
5344 sec = bfd_get_section_by_name (abfd, name);
5349 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5352 || !bfd_set_section_alignment (abfd, sec, 2))
5355 /* Set the gc mark to prevent the section from being removed by garbage
5356 collection, despite the fact that no relocs refer to this section. */
5362 /* Add the glue sections to ABFD. This function is called from the
5363 linker scripts in ld/emultempl/{armelf}.em. */
5366 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5367 struct bfd_link_info *info)
5369 /* If we are only performing a partial
5370 link do not bother adding the glue. */
5371 if (info->relocatable)
5374 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5375 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5376 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5377 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5380 /* Select a BFD to be used to hold the sections used by the glue code.
5381 This function is called from the linker scripts in ld/emultempl/
5385 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5387 struct elf32_arm_link_hash_table *globals;
5389 /* If we are only performing a partial link
5390 do not bother getting a bfd to hold the glue. */
5391 if (info->relocatable)
5394 /* Make sure we don't attach the glue sections to a dynamic object. */
5395 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5397 globals = elf32_arm_hash_table (info);
5398 BFD_ASSERT (globals != NULL);
5400 if (globals->bfd_of_glue_owner != NULL)
5403 /* Save the bfd for later use. */
5404 globals->bfd_of_glue_owner = abfd;
5410 check_use_blx (struct elf32_arm_link_hash_table *globals)
5412 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5414 globals->use_blx = 1;
5418 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5419 struct bfd_link_info *link_info)
5421 Elf_Internal_Shdr *symtab_hdr;
5422 Elf_Internal_Rela *internal_relocs = NULL;
5423 Elf_Internal_Rela *irel, *irelend;
5424 bfd_byte *contents = NULL;
5427 struct elf32_arm_link_hash_table *globals;
5429 /* If we are only performing a partial link do not bother
5430 to construct any glue. */
5431 if (link_info->relocatable)
5434 /* Here we have a bfd that is to be included on the link. We have a
5435 hook to do reloc rummaging, before section sizes are nailed down. */
5436 globals = elf32_arm_hash_table (link_info);
5437 BFD_ASSERT (globals != NULL);
5439 check_use_blx (globals);
5441 if (globals->byteswap_code && !bfd_big_endian (abfd))
5443 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5448 /* PR 5398: If we have not decided to include any loadable sections in
5449 the output then we will not have a glue owner bfd. This is OK, it
5450 just means that there is nothing else for us to do here. */
5451 if (globals->bfd_of_glue_owner == NULL)
5454 /* Rummage around all the relocs and map the glue vectors. */
5455 sec = abfd->sections;
5460 for (; sec != NULL; sec = sec->next)
5462 if (sec->reloc_count == 0)
5465 if ((sec->flags & SEC_EXCLUDE) != 0)
5468 symtab_hdr = & elf_symtab_hdr (abfd);
5470 /* Load the relocs. */
5472 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5474 if (internal_relocs == NULL)
5477 irelend = internal_relocs + sec->reloc_count;
5478 for (irel = internal_relocs; irel < irelend; irel++)
5481 unsigned long r_index;
5483 struct elf_link_hash_entry *h;
5485 r_type = ELF32_R_TYPE (irel->r_info);
5486 r_index = ELF32_R_SYM (irel->r_info);
5488 /* These are the only relocation types we care about. */
5489 if ( r_type != R_ARM_PC24
5490 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5493 /* Get the section contents if we haven't done so already. */
5494 if (contents == NULL)
5496 /* Get cached copy if it exists. */
5497 if (elf_section_data (sec)->this_hdr.contents != NULL)
5498 contents = elf_section_data (sec)->this_hdr.contents;
5501 /* Go get them off disk. */
5502 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5507 if (r_type == R_ARM_V4BX)
5511 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5512 record_arm_bx_glue (link_info, reg);
5516 /* If the relocation is not against a symbol it cannot concern us. */
5519 /* We don't care about local symbols. */
5520 if (r_index < symtab_hdr->sh_info)
5523 /* This is an external symbol. */
5524 r_index -= symtab_hdr->sh_info;
5525 h = (struct elf_link_hash_entry *)
5526 elf_sym_hashes (abfd)[r_index];
5528 /* If the relocation is against a static symbol it must be within
5529 the current section and so cannot be a cross ARM/Thumb relocation. */
5533 /* If the call will go through a PLT entry then we do not need
5535 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
5541 /* This one is a call from arm code. We need to look up
5542 the target of the call. If it is a thumb target, we
5544 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5545 record_arm_to_thumb_glue (link_info, h);
5553 if (contents != NULL
5554 && elf_section_data (sec)->this_hdr.contents != contents)
5558 if (internal_relocs != NULL
5559 && elf_section_data (sec)->relocs != internal_relocs)
5560 free (internal_relocs);
5561 internal_relocs = NULL;
5567 if (contents != NULL
5568 && elf_section_data (sec)->this_hdr.contents != contents)
5570 if (internal_relocs != NULL
5571 && elf_section_data (sec)->relocs != internal_relocs)
5572 free (internal_relocs);
5579 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5582 bfd_elf32_arm_init_maps (bfd *abfd)
5584 Elf_Internal_Sym *isymbuf;
5585 Elf_Internal_Shdr *hdr;
5586 unsigned int i, localsyms;
5588 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5589 if (! is_arm_elf (abfd))
5592 if ((abfd->flags & DYNAMIC) != 0)
5595 hdr = & elf_symtab_hdr (abfd);
5596 localsyms = hdr->sh_info;
5598 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5599 should contain the number of local symbols, which should come before any
5600 global symbols. Mapping symbols are always local. */
5601 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5604 /* No internal symbols read? Skip this BFD. */
5605 if (isymbuf == NULL)
5608 for (i = 0; i < localsyms; i++)
5610 Elf_Internal_Sym *isym = &isymbuf[i];
5611 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5615 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5617 name = bfd_elf_string_from_elf_section (abfd,
5618 hdr->sh_link, isym->st_name);
5620 if (bfd_is_arm_special_symbol_name (name,
5621 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5622 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5628 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5629 say what they wanted. */
5632 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5634 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5635 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5637 if (globals == NULL)
5640 if (globals->fix_cortex_a8 == -1)
5642 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5643 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5644 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5645 || out_attr[Tag_CPU_arch_profile].i == 0))
5646 globals->fix_cortex_a8 = 1;
5648 globals->fix_cortex_a8 = 0;
5654 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5656 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5657 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5659 if (globals == NULL)
5661 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5662 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5664 switch (globals->vfp11_fix)
5666 case BFD_ARM_VFP11_FIX_DEFAULT:
5667 case BFD_ARM_VFP11_FIX_NONE:
5668 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5672 /* Give a warning, but do as the user requests anyway. */
5673 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5674 "workaround is not necessary for target architecture"), obfd);
5677 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5678 /* For earlier architectures, we might need the workaround, but do not
5679 enable it by default. If users is running with broken hardware, they
5680 must enable the erratum fix explicitly. */
5681 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5685 enum bfd_arm_vfp11_pipe
5693 /* Return a VFP register number. This is encoded as RX:X for single-precision
5694 registers, or X:RX for double-precision registers, where RX is the group of
5695 four bits in the instruction encoding and X is the single extension bit.
5696 RX and X fields are specified using their lowest (starting) bit. The return
5699 0...31: single-precision registers s0...s31
5700 32...63: double-precision registers d0...d31.
5702 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5703 encounter VFP3 instructions, so we allow the full range for DP registers. */
5706 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5710 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5712 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5715 /* Set bits in *WMASK according to a register number REG as encoded by
5716 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5719 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5724 *wmask |= 3 << ((reg - 32) * 2);
5727 /* Return TRUE if WMASK overwrites anything in REGS. */
5730 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5734 for (i = 0; i < numregs; i++)
5736 unsigned int reg = regs[i];
5738 if (reg < 32 && (wmask & (1 << reg)) != 0)
5746 if ((wmask & (3 << (reg * 2))) != 0)
5753 /* In this function, we're interested in two things: finding input registers
5754 for VFP data-processing instructions, and finding the set of registers which
5755 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5756 hold the written set, so FLDM etc. are easy to deal with (we're only
5757 interested in 32 SP registers or 16 dp registers, due to the VFP version
5758 implemented by the chip in question). DP registers are marked by setting
5759 both SP registers in the write mask). */
5761 static enum bfd_arm_vfp11_pipe
5762 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5765 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5766 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5768 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5771 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5772 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5774 pqrs = ((insn & 0x00800000) >> 20)
5775 | ((insn & 0x00300000) >> 19)
5776 | ((insn & 0x00000040) >> 6);
5780 case 0: /* fmac[sd]. */
5781 case 1: /* fnmac[sd]. */
5782 case 2: /* fmsc[sd]. */
5783 case 3: /* fnmsc[sd]. */
5785 bfd_arm_vfp11_write_mask (destmask, fd);
5787 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5792 case 4: /* fmul[sd]. */
5793 case 5: /* fnmul[sd]. */
5794 case 6: /* fadd[sd]. */
5795 case 7: /* fsub[sd]. */
5799 case 8: /* fdiv[sd]. */
5802 bfd_arm_vfp11_write_mask (destmask, fd);
5803 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5808 case 15: /* extended opcode. */
5810 unsigned int extn = ((insn >> 15) & 0x1e)
5811 | ((insn >> 7) & 1);
5815 case 0: /* fcpy[sd]. */
5816 case 1: /* fabs[sd]. */
5817 case 2: /* fneg[sd]. */
5818 case 8: /* fcmp[sd]. */
5819 case 9: /* fcmpe[sd]. */
5820 case 10: /* fcmpz[sd]. */
5821 case 11: /* fcmpez[sd]. */
5822 case 16: /* fuito[sd]. */
5823 case 17: /* fsito[sd]. */
5824 case 24: /* ftoui[sd]. */
5825 case 25: /* ftouiz[sd]. */
5826 case 26: /* ftosi[sd]. */
5827 case 27: /* ftosiz[sd]. */
5828 /* These instructions will not bounce due to underflow. */
5833 case 3: /* fsqrt[sd]. */
5834 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5835 registers to cause the erratum in previous instructions. */
5836 bfd_arm_vfp11_write_mask (destmask, fd);
5840 case 15: /* fcvt{ds,sd}. */
5844 bfd_arm_vfp11_write_mask (destmask, fd);
5846 /* Only FCVTSD can underflow. */
5847 if ((insn & 0x100) != 0)
5866 /* Two-register transfer. */
5867 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5869 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5871 if ((insn & 0x100000) == 0)
5874 bfd_arm_vfp11_write_mask (destmask, fm);
5877 bfd_arm_vfp11_write_mask (destmask, fm);
5878 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5884 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5886 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5887 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5891 case 0: /* Two-reg transfer. We should catch these above. */
5894 case 2: /* fldm[sdx]. */
5898 unsigned int i, offset = insn & 0xff;
5903 for (i = fd; i < fd + offset; i++)
5904 bfd_arm_vfp11_write_mask (destmask, i);
5908 case 4: /* fld[sd]. */
5910 bfd_arm_vfp11_write_mask (destmask, fd);
5919 /* Single-register transfer. Note L==0. */
5920 else if ((insn & 0x0f100e10) == 0x0e000a10)
5922 unsigned int opcode = (insn >> 21) & 7;
5923 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5927 case 0: /* fmsr/fmdlr. */
5928 case 1: /* fmdhr. */
5929 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5930 destination register. I don't know if this is exactly right,
5931 but it is the conservative choice. */
5932 bfd_arm_vfp11_write_mask (destmask, fn);
5946 static int elf32_arm_compare_mapping (const void * a, const void * b);
5949 /* Look for potentially-troublesome code sequences which might trigger the
5950 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5951 (available from ARM) for details of the erratum. A short version is
5952 described in ld.texinfo. */
5955 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5958 bfd_byte *contents = NULL;
5960 int regs[3], numregs = 0;
5961 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5962 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5964 if (globals == NULL)
5967 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5968 The states transition as follows:
5970 0 -> 1 (vector) or 0 -> 2 (scalar)
5971 A VFP FMAC-pipeline instruction has been seen. Fill
5972 regs[0]..regs[numregs-1] with its input operands. Remember this
5973 instruction in 'first_fmac'.
5976 Any instruction, except for a VFP instruction which overwrites
5981 A VFP instruction has been seen which overwrites any of regs[*].
5982 We must make a veneer! Reset state to 0 before examining next
5986 If we fail to match anything in state 2, reset to state 0 and reset
5987 the instruction pointer to the instruction after 'first_fmac'.
5989 If the VFP11 vector mode is in use, there must be at least two unrelated
5990 instructions between anti-dependent VFP11 instructions to properly avoid
5991 triggering the erratum, hence the use of the extra state 1. */
5993 /* If we are only performing a partial link do not bother
5994 to construct any glue. */
5995 if (link_info->relocatable)
5998 /* Skip if this bfd does not correspond to an ELF image. */
5999 if (! is_arm_elf (abfd))
6002 /* We should have chosen a fix type by the time we get here. */
6003 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6005 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6008 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6009 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6012 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6014 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6015 struct _arm_elf_section_data *sec_data;
6017 /* If we don't have executable progbits, we're not interested in this
6018 section. Also skip if section is to be excluded. */
6019 if (elf_section_type (sec) != SHT_PROGBITS
6020 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6021 || (sec->flags & SEC_EXCLUDE) != 0
6022 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6023 || sec->output_section == bfd_abs_section_ptr
6024 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6027 sec_data = elf32_arm_section_data (sec);
6029 if (sec_data->mapcount == 0)
6032 if (elf_section_data (sec)->this_hdr.contents != NULL)
6033 contents = elf_section_data (sec)->this_hdr.contents;
6034 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6037 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6038 elf32_arm_compare_mapping);
6040 for (span = 0; span < sec_data->mapcount; span++)
6042 unsigned int span_start = sec_data->map[span].vma;
6043 unsigned int span_end = (span == sec_data->mapcount - 1)
6044 ? sec->size : sec_data->map[span + 1].vma;
6045 char span_type = sec_data->map[span].type;
6047 /* FIXME: Only ARM mode is supported at present. We may need to
6048 support Thumb-2 mode also at some point. */
6049 if (span_type != 'a')
6052 for (i = span_start; i < span_end;)
6054 unsigned int next_i = i + 4;
6055 unsigned int insn = bfd_big_endian (abfd)
6056 ? (contents[i] << 24)
6057 | (contents[i + 1] << 16)
6058 | (contents[i + 2] << 8)
6060 : (contents[i + 3] << 24)
6061 | (contents[i + 2] << 16)
6062 | (contents[i + 1] << 8)
6064 unsigned int writemask = 0;
6065 enum bfd_arm_vfp11_pipe vpipe;
6070 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6072 /* I'm assuming the VFP11 erratum can trigger with denorm
6073 operands on either the FMAC or the DS pipeline. This might
6074 lead to slightly overenthusiastic veneer insertion. */
6075 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6077 state = use_vector ? 1 : 2;
6079 veneer_of_insn = insn;
6085 int other_regs[3], other_numregs;
6086 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6089 if (vpipe != VFP11_BAD
6090 && bfd_arm_vfp11_antidependency (writemask, regs,
6100 int other_regs[3], other_numregs;
6101 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6104 if (vpipe != VFP11_BAD
6105 && bfd_arm_vfp11_antidependency (writemask, regs,
6111 next_i = first_fmac + 4;
6117 abort (); /* Should be unreachable. */
6122 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6123 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6125 elf32_arm_section_data (sec)->erratumcount += 1;
6127 newerr->u.b.vfp_insn = veneer_of_insn;
6132 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6139 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6144 newerr->next = sec_data->erratumlist;
6145 sec_data->erratumlist = newerr;
6154 if (contents != NULL
6155 && elf_section_data (sec)->this_hdr.contents != contents)
6163 if (contents != NULL
6164 && elf_section_data (sec)->this_hdr.contents != contents)
6170 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6171 after sections have been laid out, using specially-named symbols. */
6174 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6175 struct bfd_link_info *link_info)
6178 struct elf32_arm_link_hash_table *globals;
6181 if (link_info->relocatable)
6184 /* Skip if this bfd does not correspond to an ELF image. */
6185 if (! is_arm_elf (abfd))
6188 globals = elf32_arm_hash_table (link_info);
6189 if (globals == NULL)
6192 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6193 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6195 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6197 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6198 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6200 for (; errnode != NULL; errnode = errnode->next)
6202 struct elf_link_hash_entry *myh;
6205 switch (errnode->type)
6207 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6208 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6209 /* Find veneer symbol. */
6210 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6211 errnode->u.b.veneer->u.v.id);
6213 myh = elf_link_hash_lookup
6214 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6217 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6218 "`%s'"), abfd, tmp_name);
6220 vma = myh->root.u.def.section->output_section->vma
6221 + myh->root.u.def.section->output_offset
6222 + myh->root.u.def.value;
6224 errnode->u.b.veneer->vma = vma;
6227 case VFP11_ERRATUM_ARM_VENEER:
6228 case VFP11_ERRATUM_THUMB_VENEER:
6229 /* Find return location. */
6230 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6233 myh = elf_link_hash_lookup
6234 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6237 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6238 "`%s'"), abfd, tmp_name);
6240 vma = myh->root.u.def.section->output_section->vma
6241 + myh->root.u.def.section->output_offset
6242 + myh->root.u.def.value;
6244 errnode->u.v.branch->vma = vma;
6257 /* Set target relocation values needed during linking. */
6260 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6261 struct bfd_link_info *link_info,
6263 char * target2_type,
6266 bfd_arm_vfp11_fix vfp11_fix,
6267 int no_enum_warn, int no_wchar_warn,
6268 int pic_veneer, int fix_cortex_a8)
6270 struct elf32_arm_link_hash_table *globals;
6272 globals = elf32_arm_hash_table (link_info);
6273 if (globals == NULL)
6276 globals->target1_is_rel = target1_is_rel;
6277 if (strcmp (target2_type, "rel") == 0)
6278 globals->target2_reloc = R_ARM_REL32;
6279 else if (strcmp (target2_type, "abs") == 0)
6280 globals->target2_reloc = R_ARM_ABS32;
6281 else if (strcmp (target2_type, "got-rel") == 0)
6282 globals->target2_reloc = R_ARM_GOT_PREL;
6285 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6288 globals->fix_v4bx = fix_v4bx;
6289 globals->use_blx |= use_blx;
6290 globals->vfp11_fix = vfp11_fix;
6291 globals->pic_veneer = pic_veneer;
6292 globals->fix_cortex_a8 = fix_cortex_a8;
6294 BFD_ASSERT (is_arm_elf (output_bfd));
6295 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6296 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6299 /* Replace the target offset of a Thumb bl or b.w instruction. */
6302 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6308 BFD_ASSERT ((offset & 1) == 0);
6310 upper = bfd_get_16 (abfd, insn);
6311 lower = bfd_get_16 (abfd, insn + 2);
6312 reloc_sign = (offset < 0) ? 1 : 0;
6313 upper = (upper & ~(bfd_vma) 0x7ff)
6314 | ((offset >> 12) & 0x3ff)
6315 | (reloc_sign << 10);
6316 lower = (lower & ~(bfd_vma) 0x2fff)
6317 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6318 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6319 | ((offset >> 1) & 0x7ff);
6320 bfd_put_16 (abfd, upper, insn);
6321 bfd_put_16 (abfd, lower, insn + 2);
6324 /* Thumb code calling an ARM function. */
6327 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6331 asection * input_section,
6332 bfd_byte * hit_data,
6335 bfd_signed_vma addend,
6337 char **error_message)
6341 long int ret_offset;
6342 struct elf_link_hash_entry * myh;
6343 struct elf32_arm_link_hash_table * globals;
6345 myh = find_thumb_glue (info, name, error_message);
6349 globals = elf32_arm_hash_table (info);
6350 BFD_ASSERT (globals != NULL);
6351 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6353 my_offset = myh->root.u.def.value;
6355 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6356 THUMB2ARM_GLUE_SECTION_NAME);
6358 BFD_ASSERT (s != NULL);
6359 BFD_ASSERT (s->contents != NULL);
6360 BFD_ASSERT (s->output_section != NULL);
6362 if ((my_offset & 0x01) == 0x01)
6365 && sym_sec->owner != NULL
6366 && !INTERWORK_FLAG (sym_sec->owner))
6368 (*_bfd_error_handler)
6369 (_("%B(%s): warning: interworking not enabled.\n"
6370 " first occurrence: %B: thumb call to arm"),
6371 sym_sec->owner, input_bfd, name);
6377 myh->root.u.def.value = my_offset;
6379 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6380 s->contents + my_offset);
6382 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6383 s->contents + my_offset + 2);
6386 /* Address of destination of the stub. */
6387 ((bfd_signed_vma) val)
6389 /* Offset from the start of the current section
6390 to the start of the stubs. */
6392 /* Offset of the start of this stub from the start of the stubs. */
6394 /* Address of the start of the current section. */
6395 + s->output_section->vma)
6396 /* The branch instruction is 4 bytes into the stub. */
6398 /* ARM branches work from the pc of the instruction + 8. */
6401 put_arm_insn (globals, output_bfd,
6402 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6403 s->contents + my_offset + 4);
6406 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6408 /* Now go back and fix up the original BL insn to point to here. */
6410 /* Address of where the stub is located. */
6411 (s->output_section->vma + s->output_offset + my_offset)
6412 /* Address of where the BL is located. */
6413 - (input_section->output_section->vma + input_section->output_offset
6415 /* Addend in the relocation. */
6417 /* Biassing for PC-relative addressing. */
6420 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6425 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6427 static struct elf_link_hash_entry *
6428 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6435 char ** error_message)
6438 long int ret_offset;
6439 struct elf_link_hash_entry * myh;
6440 struct elf32_arm_link_hash_table * globals;
6442 myh = find_arm_glue (info, name, error_message);
6446 globals = elf32_arm_hash_table (info);
6447 BFD_ASSERT (globals != NULL);
6448 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6450 my_offset = myh->root.u.def.value;
6452 if ((my_offset & 0x01) == 0x01)
6455 && sym_sec->owner != NULL
6456 && !INTERWORK_FLAG (sym_sec->owner))
6458 (*_bfd_error_handler)
6459 (_("%B(%s): warning: interworking not enabled.\n"
6460 " first occurrence: %B: arm call to thumb"),
6461 sym_sec->owner, input_bfd, name);
6465 myh->root.u.def.value = my_offset;
6467 if (info->shared || globals->root.is_relocatable_executable
6468 || globals->pic_veneer)
6470 /* For relocatable objects we can't use absolute addresses,
6471 so construct the address from a relative offset. */
6472 /* TODO: If the offset is small it's probably worth
6473 constructing the address with adds. */
6474 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6475 s->contents + my_offset);
6476 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6477 s->contents + my_offset + 4);
6478 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6479 s->contents + my_offset + 8);
6480 /* Adjust the offset by 4 for the position of the add,
6481 and 8 for the pipeline offset. */
6482 ret_offset = (val - (s->output_offset
6483 + s->output_section->vma
6486 bfd_put_32 (output_bfd, ret_offset,
6487 s->contents + my_offset + 12);
6489 else if (globals->use_blx)
6491 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6492 s->contents + my_offset);
6494 /* It's a thumb address. Add the low order bit. */
6495 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6496 s->contents + my_offset + 4);
6500 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6501 s->contents + my_offset);
6503 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6504 s->contents + my_offset + 4);
6506 /* It's a thumb address. Add the low order bit. */
6507 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6508 s->contents + my_offset + 8);
6514 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6519 /* Arm code calling a Thumb function. */
6522 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6526 asection * input_section,
6527 bfd_byte * hit_data,
6530 bfd_signed_vma addend,
6532 char **error_message)
6534 unsigned long int tmp;
6537 long int ret_offset;
6538 struct elf_link_hash_entry * myh;
6539 struct elf32_arm_link_hash_table * globals;
6541 globals = elf32_arm_hash_table (info);
6542 BFD_ASSERT (globals != NULL);
6543 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6545 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6546 ARM2THUMB_GLUE_SECTION_NAME);
6547 BFD_ASSERT (s != NULL);
6548 BFD_ASSERT (s->contents != NULL);
6549 BFD_ASSERT (s->output_section != NULL);
6551 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6552 sym_sec, val, s, error_message);
6556 my_offset = myh->root.u.def.value;
6557 tmp = bfd_get_32 (input_bfd, hit_data);
6558 tmp = tmp & 0xFF000000;
6560 /* Somehow these are both 4 too far, so subtract 8. */
6561 ret_offset = (s->output_offset
6563 + s->output_section->vma
6564 - (input_section->output_offset
6565 + input_section->output_section->vma
6569 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6571 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6576 /* Populate Arm stub for an exported Thumb function. */
6579 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6581 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6583 struct elf_link_hash_entry * myh;
6584 struct elf32_arm_link_hash_entry *eh;
6585 struct elf32_arm_link_hash_table * globals;
6588 char *error_message;
6590 eh = elf32_arm_hash_entry (h);
6591 /* Allocate stubs for exported Thumb functions on v4t. */
6592 if (eh->export_glue == NULL)
6595 globals = elf32_arm_hash_table (info);
6596 BFD_ASSERT (globals != NULL);
6597 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6599 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6600 ARM2THUMB_GLUE_SECTION_NAME);
6601 BFD_ASSERT (s != NULL);
6602 BFD_ASSERT (s->contents != NULL);
6603 BFD_ASSERT (s->output_section != NULL);
6605 sec = eh->export_glue->root.u.def.section;
6607 BFD_ASSERT (sec->output_section != NULL);
6609 val = eh->export_glue->root.u.def.value + sec->output_offset
6610 + sec->output_section->vma;
6612 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6613 h->root.u.def.section->owner,
6614 globals->obfd, sec, val, s,
6620 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6623 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6628 struct elf32_arm_link_hash_table *globals;
6630 globals = elf32_arm_hash_table (info);
6631 BFD_ASSERT (globals != NULL);
6632 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6634 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6635 ARM_BX_GLUE_SECTION_NAME);
6636 BFD_ASSERT (s != NULL);
6637 BFD_ASSERT (s->contents != NULL);
6638 BFD_ASSERT (s->output_section != NULL);
6640 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6642 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6644 if ((globals->bx_glue_offset[reg] & 1) == 0)
6646 p = s->contents + glue_addr;
6647 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6648 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6649 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6650 globals->bx_glue_offset[reg] |= 1;
6653 return glue_addr + s->output_section->vma + s->output_offset;
6656 /* Generate Arm stubs for exported Thumb symbols. */
6658 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6659 struct bfd_link_info *link_info)
6661 struct elf32_arm_link_hash_table * globals;
6663 if (link_info == NULL)
6664 /* Ignore this if we are not called by the ELF backend linker. */
6667 globals = elf32_arm_hash_table (link_info);
6668 if (globals == NULL)
6671 /* If blx is available then exported Thumb symbols are OK and there is
6673 if (globals->use_blx)
6676 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6680 /* Some relocations map to different relocations depending on the
6681 target. Return the real relocation. */
6684 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6690 if (globals->target1_is_rel)
6696 return globals->target2_reloc;
6703 /* Return the base VMA address which should be subtracted from real addresses
6704 when resolving @dtpoff relocation.
6705 This is PT_TLS segment p_vaddr. */
6708 dtpoff_base (struct bfd_link_info *info)
6710 /* If tls_sec is NULL, we should have signalled an error already. */
6711 if (elf_hash_table (info)->tls_sec == NULL)
6713 return elf_hash_table (info)->tls_sec->vma;
6716 /* Return the relocation value for @tpoff relocation
6717 if STT_TLS virtual address is ADDRESS. */
6720 tpoff (struct bfd_link_info *info, bfd_vma address)
6722 struct elf_link_hash_table *htab = elf_hash_table (info);
6725 /* If tls_sec is NULL, we should have signalled an error already. */
6726 if (htab->tls_sec == NULL)
6728 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6729 return address - htab->tls_sec->vma + base;
6732 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6733 VALUE is the relocation value. */
6735 static bfd_reloc_status_type
6736 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6739 return bfd_reloc_overflow;
6741 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6742 bfd_put_32 (abfd, value, data);
6743 return bfd_reloc_ok;
6746 /* For a given value of n, calculate the value of G_n as required to
6747 deal with group relocations. We return it in the form of an
6748 encoded constant-and-rotation, together with the final residual. If n is
6749 specified as less than zero, then final_residual is filled with the
6750 input value and no further action is performed. */
6753 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6757 bfd_vma encoded_g_n = 0;
6758 bfd_vma residual = value; /* Also known as Y_n. */
6760 for (current_n = 0; current_n <= n; current_n++)
6764 /* Calculate which part of the value to mask. */
6771 /* Determine the most significant bit in the residual and
6772 align the resulting value to a 2-bit boundary. */
6773 for (msb = 30; msb >= 0; msb -= 2)
6774 if (residual & (3 << msb))
6777 /* The desired shift is now (msb - 6), or zero, whichever
6784 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6785 g_n = residual & (0xff << shift);
6786 encoded_g_n = (g_n >> shift)
6787 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6789 /* Calculate the residual for the next time around. */
6793 *final_residual = residual;
6798 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6799 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6802 identify_add_or_sub (bfd_vma insn)
6804 int opcode = insn & 0x1e00000;
6806 if (opcode == 1 << 23) /* ADD */
6809 if (opcode == 1 << 22) /* SUB */
6815 /* Perform a relocation as part of a final link. */
6817 static bfd_reloc_status_type
6818 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6821 asection * input_section,
6822 bfd_byte * contents,
6823 Elf_Internal_Rela * rel,
6825 struct bfd_link_info * info,
6827 const char * sym_name,
6829 struct elf_link_hash_entry * h,
6830 bfd_boolean * unresolved_reloc_p,
6831 char ** error_message)
6833 unsigned long r_type = howto->type;
6834 unsigned long r_symndx;
6835 bfd_byte * hit_data = contents + rel->r_offset;
6836 bfd_vma * local_got_offsets;
6837 asection * sgot = NULL;
6838 asection * splt = NULL;
6839 asection * sreloc = NULL;
6842 bfd_signed_vma signed_addend;
6843 struct elf32_arm_link_hash_table * globals;
6845 globals = elf32_arm_hash_table (info);
6846 if (globals == NULL)
6847 return bfd_reloc_notsupported;
6849 BFD_ASSERT (is_arm_elf (input_bfd));
6851 /* Some relocation types map to different relocations depending on the
6852 target. We pick the right one here. */
6853 r_type = arm_real_reloc_type (globals, r_type);
6854 if (r_type != howto->type)
6855 howto = elf32_arm_howto_from_type (r_type);
6857 /* If the start address has been set, then set the EF_ARM_HASENTRY
6858 flag. Setting this more than once is redundant, but the cost is
6859 not too high, and it keeps the code simple.
6861 The test is done here, rather than somewhere else, because the
6862 start address is only set just before the final link commences.
6864 Note - if the user deliberately sets a start address of 0, the
6865 flag will not be set. */
6866 if (bfd_get_start_address (output_bfd) != 0)
6867 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6869 sgot = globals->root.sgot;
6870 splt = globals->root.splt;
6871 srelgot = globals->root.srelgot;
6872 local_got_offsets = elf_local_got_offsets (input_bfd);
6873 r_symndx = ELF32_R_SYM (rel->r_info);
6875 if (globals->use_rel)
6877 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6879 if (addend & ((howto->src_mask + 1) >> 1))
6882 signed_addend &= ~ howto->src_mask;
6883 signed_addend |= addend;
6886 signed_addend = addend;
6889 addend = signed_addend = rel->r_addend;
6894 /* We don't need to find a value for this symbol. It's just a
6896 *unresolved_reloc_p = FALSE;
6897 return bfd_reloc_ok;
6900 if (!globals->vxworks_p)
6901 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6905 case R_ARM_ABS32_NOI:
6907 case R_ARM_REL32_NOI:
6913 /* Handle relocations which should use the PLT entry. ABS32/REL32
6914 will use the symbol's value, which may point to a PLT entry, but we
6915 don't need to handle that here. If we created a PLT entry, all
6916 branches in this object should go to it, except if the PLT is too
6917 far away, in which case a long branch stub should be inserted. */
6918 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6919 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6920 && r_type != R_ARM_CALL
6921 && r_type != R_ARM_JUMP24
6922 && r_type != R_ARM_PLT32)
6925 && h->plt.offset != (bfd_vma) -1)
6927 /* If we've created a .plt section, and assigned a PLT entry to
6928 this function, it should not be known to bind locally. If
6929 it were, we would have cleared the PLT entry. */
6930 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6932 value = (splt->output_section->vma
6933 + splt->output_offset
6935 *unresolved_reloc_p = FALSE;
6936 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6937 contents, rel->r_offset, value,
6941 /* When generating a shared object or relocatable executable, these
6942 relocations are copied into the output file to be resolved at
6944 if ((info->shared || globals->root.is_relocatable_executable)
6945 && (input_section->flags & SEC_ALLOC)
6946 && !(globals->vxworks_p
6947 && strcmp (input_section->output_section->name,
6949 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6950 || !SYMBOL_CALLS_LOCAL (info, h))
6951 && (!strstr (input_section->name, STUB_SUFFIX))
6953 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6954 || h->root.type != bfd_link_hash_undefweak)
6955 && r_type != R_ARM_PC24
6956 && r_type != R_ARM_CALL
6957 && r_type != R_ARM_JUMP24
6958 && r_type != R_ARM_PREL31
6959 && r_type != R_ARM_PLT32)
6961 Elf_Internal_Rela outrel;
6963 bfd_boolean skip, relocate;
6965 *unresolved_reloc_p = FALSE;
6969 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6970 ! globals->use_rel);
6973 return bfd_reloc_notsupported;
6979 outrel.r_addend = addend;
6981 _bfd_elf_section_offset (output_bfd, info, input_section,
6983 if (outrel.r_offset == (bfd_vma) -1)
6985 else if (outrel.r_offset == (bfd_vma) -2)
6986 skip = TRUE, relocate = TRUE;
6987 outrel.r_offset += (input_section->output_section->vma
6988 + input_section->output_offset);
6991 memset (&outrel, 0, sizeof outrel);
6996 || !h->def_regular))
6997 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7002 /* This symbol is local, or marked to become local. */
7003 if (sym_flags == STT_ARM_TFUNC)
7005 if (globals->symbian_p)
7009 /* On Symbian OS, the data segment and text segement
7010 can be relocated independently. Therefore, we
7011 must indicate the segment to which this
7012 relocation is relative. The BPABI allows us to
7013 use any symbol in the right segment; we just use
7014 the section symbol as it is convenient. (We
7015 cannot use the symbol given by "h" directly as it
7016 will not appear in the dynamic symbol table.)
7018 Note that the dynamic linker ignores the section
7019 symbol value, so we don't subtract osec->vma
7020 from the emitted reloc addend. */
7022 osec = sym_sec->output_section;
7024 osec = input_section->output_section;
7025 symbol = elf_section_data (osec)->dynindx;
7028 struct elf_link_hash_table *htab = elf_hash_table (info);
7030 if ((osec->flags & SEC_READONLY) == 0
7031 && htab->data_index_section != NULL)
7032 osec = htab->data_index_section;
7034 osec = htab->text_index_section;
7035 symbol = elf_section_data (osec)->dynindx;
7037 BFD_ASSERT (symbol != 0);
7040 /* On SVR4-ish systems, the dynamic loader cannot
7041 relocate the text and data segments independently,
7042 so the symbol does not matter. */
7044 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7045 if (globals->use_rel)
7048 outrel.r_addend += value;
7051 loc = sreloc->contents;
7052 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7053 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7055 /* If this reloc is against an external symbol, we do not want to
7056 fiddle with the addend. Otherwise, we need to include the symbol
7057 value so that it becomes an addend for the dynamic reloc. */
7059 return bfd_reloc_ok;
7061 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7062 contents, rel->r_offset, value,
7065 else switch (r_type)
7068 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7070 case R_ARM_XPC25: /* Arm BLX instruction. */
7073 case R_ARM_PC24: /* Arm B/BL instruction. */
7076 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7078 if (r_type == R_ARM_XPC25)
7080 /* Check for Arm calling Arm function. */
7081 /* FIXME: Should we translate the instruction into a BL
7082 instruction instead ? */
7083 if (sym_flags != STT_ARM_TFUNC)
7084 (*_bfd_error_handler)
7085 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7087 h ? h->root.root.string : "(local)");
7089 else if (r_type == R_ARM_PC24)
7091 /* Check for Arm calling Thumb function. */
7092 if (sym_flags == STT_ARM_TFUNC)
7094 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7095 output_bfd, input_section,
7096 hit_data, sym_sec, rel->r_offset,
7097 signed_addend, value,
7099 return bfd_reloc_ok;
7101 return bfd_reloc_dangerous;
7105 /* Check if a stub has to be inserted because the
7106 destination is too far or we are changing mode. */
7107 if ( r_type == R_ARM_CALL
7108 || r_type == R_ARM_JUMP24
7109 || r_type == R_ARM_PLT32)
7111 enum elf32_arm_stub_type stub_type = arm_stub_none;
7112 struct elf32_arm_link_hash_entry *hash;
7114 hash = (struct elf32_arm_link_hash_entry *) h;
7115 stub_type = arm_type_of_stub (info, input_section, rel,
7118 input_bfd, sym_name);
7120 if (stub_type != arm_stub_none)
7122 /* The target is out of reach, so redirect the
7123 branch to the local stub for this function. */
7125 stub_entry = elf32_arm_get_stub_entry (input_section,
7129 if (stub_entry != NULL)
7130 value = (stub_entry->stub_offset
7131 + stub_entry->stub_sec->output_offset
7132 + stub_entry->stub_sec->output_section->vma);
7136 /* If the call goes through a PLT entry, make sure to
7137 check distance to the right destination address. */
7140 && h->plt.offset != (bfd_vma) -1)
7142 value = (splt->output_section->vma
7143 + splt->output_offset
7145 *unresolved_reloc_p = FALSE;
7146 /* The PLT entry is in ARM mode, regardless of the
7148 sym_flags = STT_FUNC;
7153 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7155 S is the address of the symbol in the relocation.
7156 P is address of the instruction being relocated.
7157 A is the addend (extracted from the instruction) in bytes.
7159 S is held in 'value'.
7160 P is the base address of the section containing the
7161 instruction plus the offset of the reloc into that
7163 (input_section->output_section->vma +
7164 input_section->output_offset +
7166 A is the addend, converted into bytes, ie:
7169 Note: None of these operations have knowledge of the pipeline
7170 size of the processor, thus it is up to the assembler to
7171 encode this information into the addend. */
7172 value -= (input_section->output_section->vma
7173 + input_section->output_offset);
7174 value -= rel->r_offset;
7175 if (globals->use_rel)
7176 value += (signed_addend << howto->size);
7178 /* RELA addends do not have to be adjusted by howto->size. */
7179 value += signed_addend;
7181 signed_addend = value;
7182 signed_addend >>= howto->rightshift;
7184 /* A branch to an undefined weak symbol is turned into a jump to
7185 the next instruction unless a PLT entry will be created.
7186 Do the same for local undefined symbols (but not for STN_UNDEF).
7187 The jump to the next instruction is optimized as a NOP depending
7188 on the architecture. */
7189 if (h ? (h->root.type == bfd_link_hash_undefweak
7190 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7191 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
7193 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7195 if (arch_has_arm_nop (globals))
7196 value |= 0x0320f000;
7198 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7202 /* Perform a signed range check. */
7203 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7204 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7205 return bfd_reloc_overflow;
7207 addend = (value & 2);
7209 value = (signed_addend & howto->dst_mask)
7210 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7212 if (r_type == R_ARM_CALL)
7214 /* Set the H bit in the BLX instruction. */
7215 if (sym_flags == STT_ARM_TFUNC)
7220 value &= ~(bfd_vma)(1 << 24);
7223 /* Select the correct instruction (BL or BLX). */
7224 /* Only if we are not handling a BL to a stub. In this
7225 case, mode switching is performed by the stub. */
7226 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7230 value &= ~(bfd_vma)(1 << 28);
7240 if (sym_flags == STT_ARM_TFUNC)
7244 case R_ARM_ABS32_NOI:
7250 if (sym_flags == STT_ARM_TFUNC)
7252 value -= (input_section->output_section->vma
7253 + input_section->output_offset + rel->r_offset);
7256 case R_ARM_REL32_NOI:
7258 value -= (input_section->output_section->vma
7259 + input_section->output_offset + rel->r_offset);
7263 value -= (input_section->output_section->vma
7264 + input_section->output_offset + rel->r_offset);
7265 value += signed_addend;
7266 if (! h || h->root.type != bfd_link_hash_undefweak)
7268 /* Check for overflow. */
7269 if ((value ^ (value >> 1)) & (1 << 30))
7270 return bfd_reloc_overflow;
7272 value &= 0x7fffffff;
7273 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7274 if (sym_flags == STT_ARM_TFUNC)
7279 bfd_put_32 (input_bfd, value, hit_data);
7280 return bfd_reloc_ok;
7285 /* There is no way to tell whether the user intended to use a signed or
7286 unsigned addend. When checking for overflow we accept either,
7287 as specified by the AAELF. */
7288 if ((long) value > 0xff || (long) value < -0x80)
7289 return bfd_reloc_overflow;
7291 bfd_put_8 (input_bfd, value, hit_data);
7292 return bfd_reloc_ok;
7297 /* See comment for R_ARM_ABS8. */
7298 if ((long) value > 0xffff || (long) value < -0x8000)
7299 return bfd_reloc_overflow;
7301 bfd_put_16 (input_bfd, value, hit_data);
7302 return bfd_reloc_ok;
7304 case R_ARM_THM_ABS5:
7305 /* Support ldr and str instructions for the thumb. */
7306 if (globals->use_rel)
7308 /* Need to refetch addend. */
7309 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7310 /* ??? Need to determine shift amount from operand size. */
7311 addend >>= howto->rightshift;
7315 /* ??? Isn't value unsigned? */
7316 if ((long) value > 0x1f || (long) value < -0x10)
7317 return bfd_reloc_overflow;
7319 /* ??? Value needs to be properly shifted into place first. */
7320 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7321 bfd_put_16 (input_bfd, value, hit_data);
7322 return bfd_reloc_ok;
7324 case R_ARM_THM_ALU_PREL_11_0:
7325 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7328 bfd_signed_vma relocation;
7330 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7331 | bfd_get_16 (input_bfd, hit_data + 2);
7333 if (globals->use_rel)
7335 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7336 | ((insn & (1 << 26)) >> 15);
7337 if (insn & 0xf00000)
7338 signed_addend = -signed_addend;
7341 relocation = value + signed_addend;
7342 relocation -= (input_section->output_section->vma
7343 + input_section->output_offset
7346 value = abs (relocation);
7348 if (value >= 0x1000)
7349 return bfd_reloc_overflow;
7351 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7352 | ((value & 0x700) << 4)
7353 | ((value & 0x800) << 15);
7357 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7358 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7360 return bfd_reloc_ok;
7364 /* PR 10073: This reloc is not generated by the GNU toolchain,
7365 but it is supported for compatibility with third party libraries
7366 generated by other compilers, specifically the ARM/IAR. */
7369 bfd_signed_vma relocation;
7371 insn = bfd_get_16 (input_bfd, hit_data);
7373 if (globals->use_rel)
7374 addend = (insn & 0x00ff) << 2;
7376 relocation = value + addend;
7377 relocation -= (input_section->output_section->vma
7378 + input_section->output_offset
7381 value = abs (relocation);
7383 /* We do not check for overflow of this reloc. Although strictly
7384 speaking this is incorrect, it appears to be necessary in order
7385 to work with IAR generated relocs. Since GCC and GAS do not
7386 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7387 a problem for them. */
7390 insn = (insn & 0xff00) | (value >> 2);
7392 bfd_put_16 (input_bfd, insn, hit_data);
7394 return bfd_reloc_ok;
7397 case R_ARM_THM_PC12:
7398 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7401 bfd_signed_vma relocation;
7403 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7404 | bfd_get_16 (input_bfd, hit_data + 2);
7406 if (globals->use_rel)
7408 signed_addend = insn & 0xfff;
7409 if (!(insn & (1 << 23)))
7410 signed_addend = -signed_addend;
7413 relocation = value + signed_addend;
7414 relocation -= (input_section->output_section->vma
7415 + input_section->output_offset
7418 value = abs (relocation);
7420 if (value >= 0x1000)
7421 return bfd_reloc_overflow;
7423 insn = (insn & 0xff7ff000) | value;
7424 if (relocation >= 0)
7427 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7428 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7430 return bfd_reloc_ok;
7433 case R_ARM_THM_XPC22:
7434 case R_ARM_THM_CALL:
7435 case R_ARM_THM_JUMP24:
7436 /* Thumb BL (branch long instruction). */
7440 bfd_boolean overflow = FALSE;
7441 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7442 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7443 bfd_signed_vma reloc_signed_max;
7444 bfd_signed_vma reloc_signed_min;
7446 bfd_signed_vma signed_check;
7448 const int thumb2 = using_thumb2 (globals);
7450 /* A branch to an undefined weak symbol is turned into a jump to
7451 the next instruction unless a PLT entry will be created.
7452 The jump to the next instruction is optimized as a NOP.W for
7453 Thumb-2 enabled architectures. */
7454 if (h && h->root.type == bfd_link_hash_undefweak
7455 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7457 if (arch_has_thumb2_nop (globals))
7459 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7460 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7464 bfd_put_16 (input_bfd, 0xe000, hit_data);
7465 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7467 return bfd_reloc_ok;
7470 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7471 with Thumb-1) involving the J1 and J2 bits. */
7472 if (globals->use_rel)
7474 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7475 bfd_vma upper = upper_insn & 0x3ff;
7476 bfd_vma lower = lower_insn & 0x7ff;
7477 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7478 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7479 bfd_vma i1 = j1 ^ s ? 0 : 1;
7480 bfd_vma i2 = j2 ^ s ? 0 : 1;
7482 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7484 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7486 signed_addend = addend;
7489 if (r_type == R_ARM_THM_XPC22)
7491 /* Check for Thumb to Thumb call. */
7492 /* FIXME: Should we translate the instruction into a BL
7493 instruction instead ? */
7494 if (sym_flags == STT_ARM_TFUNC)
7495 (*_bfd_error_handler)
7496 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7498 h ? h->root.root.string : "(local)");
7502 /* If it is not a call to Thumb, assume call to Arm.
7503 If it is a call relative to a section name, then it is not a
7504 function call at all, but rather a long jump. Calls through
7505 the PLT do not require stubs. */
7506 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7507 && (h == NULL || splt == NULL
7508 || h->plt.offset == (bfd_vma) -1))
7510 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7512 /* Convert BL to BLX. */
7513 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7515 else if (( r_type != R_ARM_THM_CALL)
7516 && (r_type != R_ARM_THM_JUMP24))
7518 if (elf32_thumb_to_arm_stub
7519 (info, sym_name, input_bfd, output_bfd, input_section,
7520 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7522 return bfd_reloc_ok;
7524 return bfd_reloc_dangerous;
7527 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7528 && r_type == R_ARM_THM_CALL)
7530 /* Make sure this is a BL. */
7531 lower_insn |= 0x1800;
7535 enum elf32_arm_stub_type stub_type = arm_stub_none;
7536 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7538 /* Check if a stub has to be inserted because the destination
7540 struct elf32_arm_stub_hash_entry *stub_entry;
7541 struct elf32_arm_link_hash_entry *hash;
7543 hash = (struct elf32_arm_link_hash_entry *) h;
7545 stub_type = arm_type_of_stub (info, input_section, rel,
7546 &sym_flags, hash, value, sym_sec,
7547 input_bfd, sym_name);
7549 if (stub_type != arm_stub_none)
7551 /* The target is out of reach or we are changing modes, so
7552 redirect the branch to the local stub for this
7554 stub_entry = elf32_arm_get_stub_entry (input_section,
7558 if (stub_entry != NULL)
7559 value = (stub_entry->stub_offset
7560 + stub_entry->stub_sec->output_offset
7561 + stub_entry->stub_sec->output_section->vma);
7563 /* If this call becomes a call to Arm, force BLX. */
7564 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7567 && !arm_stub_is_thumb (stub_entry->stub_type))
7568 || (sym_flags != STT_ARM_TFUNC))
7569 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7574 /* Handle calls via the PLT. */
7575 if (stub_type == arm_stub_none
7578 && h->plt.offset != (bfd_vma) -1)
7580 value = (splt->output_section->vma
7581 + splt->output_offset
7584 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7586 /* If the Thumb BLX instruction is available, convert
7587 the BL to a BLX instruction to call the ARM-mode
7589 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7590 sym_flags = STT_FUNC;
7594 /* Target the Thumb stub before the ARM PLT entry. */
7595 value -= PLT_THUMB_STUB_SIZE;
7596 sym_flags = STT_ARM_TFUNC;
7598 *unresolved_reloc_p = FALSE;
7601 relocation = value + signed_addend;
7603 relocation -= (input_section->output_section->vma
7604 + input_section->output_offset
7607 check = relocation >> howto->rightshift;
7609 /* If this is a signed value, the rightshift just dropped
7610 leading 1 bits (assuming twos complement). */
7611 if ((bfd_signed_vma) relocation >= 0)
7612 signed_check = check;
7614 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7616 /* Calculate the permissable maximum and minimum values for
7617 this relocation according to whether we're relocating for
7619 bitsize = howto->bitsize;
7622 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7623 reloc_signed_min = ~reloc_signed_max;
7625 /* Assumes two's complement. */
7626 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7629 if ((lower_insn & 0x5000) == 0x4000)
7630 /* For a BLX instruction, make sure that the relocation is rounded up
7631 to a word boundary. This follows the semantics of the instruction
7632 which specifies that bit 1 of the target address will come from bit
7633 1 of the base address. */
7634 relocation = (relocation + 2) & ~ 3;
7636 /* Put RELOCATION back into the insn. Assumes two's complement.
7637 We use the Thumb-2 encoding, which is safe even if dealing with
7638 a Thumb-1 instruction by virtue of our overflow check above. */
7639 reloc_sign = (signed_check < 0) ? 1 : 0;
7640 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7641 | ((relocation >> 12) & 0x3ff)
7642 | (reloc_sign << 10);
7643 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7644 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7645 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7646 | ((relocation >> 1) & 0x7ff);
7648 /* Put the relocated value back in the object file: */
7649 bfd_put_16 (input_bfd, upper_insn, hit_data);
7650 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7652 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7656 case R_ARM_THM_JUMP19:
7657 /* Thumb32 conditional branch instruction. */
7660 bfd_boolean overflow = FALSE;
7661 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7662 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7663 bfd_signed_vma reloc_signed_max = 0xffffe;
7664 bfd_signed_vma reloc_signed_min = -0x100000;
7665 bfd_signed_vma signed_check;
7667 /* Need to refetch the addend, reconstruct the top three bits,
7668 and squish the two 11 bit pieces together. */
7669 if (globals->use_rel)
7671 bfd_vma S = (upper_insn & 0x0400) >> 10;
7672 bfd_vma upper = (upper_insn & 0x003f);
7673 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7674 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7675 bfd_vma lower = (lower_insn & 0x07ff);
7680 upper -= 0x0100; /* Sign extend. */
7682 addend = (upper << 12) | (lower << 1);
7683 signed_addend = addend;
7686 /* Handle calls via the PLT. */
7687 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7689 value = (splt->output_section->vma
7690 + splt->output_offset
7692 /* Target the Thumb stub before the ARM PLT entry. */
7693 value -= PLT_THUMB_STUB_SIZE;
7694 *unresolved_reloc_p = FALSE;
7697 /* ??? Should handle interworking? GCC might someday try to
7698 use this for tail calls. */
7700 relocation = value + signed_addend;
7701 relocation -= (input_section->output_section->vma
7702 + input_section->output_offset
7704 signed_check = (bfd_signed_vma) relocation;
7706 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7709 /* Put RELOCATION back into the insn. */
7711 bfd_vma S = (relocation & 0x00100000) >> 20;
7712 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7713 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7714 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7715 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7717 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7718 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7721 /* Put the relocated value back in the object file: */
7722 bfd_put_16 (input_bfd, upper_insn, hit_data);
7723 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7725 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7728 case R_ARM_THM_JUMP11:
7729 case R_ARM_THM_JUMP8:
7730 case R_ARM_THM_JUMP6:
7731 /* Thumb B (branch) instruction). */
7733 bfd_signed_vma relocation;
7734 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7735 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7736 bfd_signed_vma signed_check;
7738 /* CZB cannot jump backward. */
7739 if (r_type == R_ARM_THM_JUMP6)
7740 reloc_signed_min = 0;
7742 if (globals->use_rel)
7744 /* Need to refetch addend. */
7745 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7746 if (addend & ((howto->src_mask + 1) >> 1))
7749 signed_addend &= ~ howto->src_mask;
7750 signed_addend |= addend;
7753 signed_addend = addend;
7754 /* The value in the insn has been right shifted. We need to
7755 undo this, so that we can perform the address calculation
7756 in terms of bytes. */
7757 signed_addend <<= howto->rightshift;
7759 relocation = value + signed_addend;
7761 relocation -= (input_section->output_section->vma
7762 + input_section->output_offset
7765 relocation >>= howto->rightshift;
7766 signed_check = relocation;
7768 if (r_type == R_ARM_THM_JUMP6)
7769 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7771 relocation &= howto->dst_mask;
7772 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7774 bfd_put_16 (input_bfd, relocation, hit_data);
7776 /* Assumes two's complement. */
7777 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7778 return bfd_reloc_overflow;
7780 return bfd_reloc_ok;
7783 case R_ARM_ALU_PCREL7_0:
7784 case R_ARM_ALU_PCREL15_8:
7785 case R_ARM_ALU_PCREL23_15:
7790 insn = bfd_get_32 (input_bfd, hit_data);
7791 if (globals->use_rel)
7793 /* Extract the addend. */
7794 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7795 signed_addend = addend;
7797 relocation = value + signed_addend;
7799 relocation -= (input_section->output_section->vma
7800 + input_section->output_offset
7802 insn = (insn & ~0xfff)
7803 | ((howto->bitpos << 7) & 0xf00)
7804 | ((relocation >> howto->bitpos) & 0xff);
7805 bfd_put_32 (input_bfd, value, hit_data);
7807 return bfd_reloc_ok;
7809 case R_ARM_GNU_VTINHERIT:
7810 case R_ARM_GNU_VTENTRY:
7811 return bfd_reloc_ok;
7813 case R_ARM_GOTOFF32:
7814 /* Relocation is relative to the start of the
7815 global offset table. */
7817 BFD_ASSERT (sgot != NULL);
7819 return bfd_reloc_notsupported;
7821 /* If we are addressing a Thumb function, we need to adjust the
7822 address by one, so that attempts to call the function pointer will
7823 correctly interpret it as Thumb code. */
7824 if (sym_flags == STT_ARM_TFUNC)
7827 /* Note that sgot->output_offset is not involved in this
7828 calculation. We always want the start of .got. If we
7829 define _GLOBAL_OFFSET_TABLE in a different way, as is
7830 permitted by the ABI, we might have to change this
7832 value -= sgot->output_section->vma;
7833 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7834 contents, rel->r_offset, value,
7838 /* Use global offset table as symbol value. */
7839 BFD_ASSERT (sgot != NULL);
7842 return bfd_reloc_notsupported;
7844 *unresolved_reloc_p = FALSE;
7845 value = sgot->output_section->vma;
7846 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7847 contents, rel->r_offset, value,
7851 case R_ARM_GOT_PREL:
7852 /* Relocation is to the entry for this symbol in the
7853 global offset table. */
7855 return bfd_reloc_notsupported;
7862 off = h->got.offset;
7863 BFD_ASSERT (off != (bfd_vma) -1);
7864 dyn = globals->root.dynamic_sections_created;
7866 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7868 && SYMBOL_REFERENCES_LOCAL (info, h))
7869 || (ELF_ST_VISIBILITY (h->other)
7870 && h->root.type == bfd_link_hash_undefweak))
7872 /* This is actually a static link, or it is a -Bsymbolic link
7873 and the symbol is defined locally. We must initialize this
7874 entry in the global offset table. Since the offset must
7875 always be a multiple of 4, we use the least significant bit
7876 to record whether we have initialized it already.
7878 When doing a dynamic link, we create a .rel(a).got relocation
7879 entry to initialize the value. This is done in the
7880 finish_dynamic_symbol routine. */
7885 /* If we are addressing a Thumb function, we need to
7886 adjust the address by one, so that attempts to
7887 call the function pointer will correctly
7888 interpret it as Thumb code. */
7889 if (sym_flags == STT_ARM_TFUNC)
7892 bfd_put_32 (output_bfd, value, sgot->contents + off);
7897 *unresolved_reloc_p = FALSE;
7899 value = sgot->output_offset + off;
7905 BFD_ASSERT (local_got_offsets != NULL &&
7906 local_got_offsets[r_symndx] != (bfd_vma) -1);
7908 off = local_got_offsets[r_symndx];
7910 /* The offset must always be a multiple of 4. We use the
7911 least significant bit to record whether we have already
7912 generated the necessary reloc. */
7917 /* If we are addressing a Thumb function, we need to
7918 adjust the address by one, so that attempts to
7919 call the function pointer will correctly
7920 interpret it as Thumb code. */
7921 if (sym_flags == STT_ARM_TFUNC)
7924 if (globals->use_rel)
7925 bfd_put_32 (output_bfd, value, sgot->contents + off);
7929 Elf_Internal_Rela outrel;
7932 BFD_ASSERT (srelgot != NULL);
7934 outrel.r_addend = addend + value;
7935 outrel.r_offset = (sgot->output_section->vma
7936 + sgot->output_offset
7938 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7939 loc = srelgot->contents;
7940 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7941 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7944 local_got_offsets[r_symndx] |= 1;
7947 value = sgot->output_offset + off;
7949 if (r_type != R_ARM_GOT32)
7950 value += sgot->output_section->vma;
7952 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7953 contents, rel->r_offset, value,
7956 case R_ARM_TLS_LDO32:
7957 value = value - dtpoff_base (info);
7959 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7960 contents, rel->r_offset, value,
7963 case R_ARM_TLS_LDM32:
7970 off = globals->tls_ldm_got.offset;
7976 /* If we don't know the module number, create a relocation
7980 Elf_Internal_Rela outrel;
7983 if (srelgot == NULL)
7986 outrel.r_addend = 0;
7987 outrel.r_offset = (sgot->output_section->vma
7988 + sgot->output_offset + off);
7989 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7991 if (globals->use_rel)
7992 bfd_put_32 (output_bfd, outrel.r_addend,
7993 sgot->contents + off);
7995 loc = srelgot->contents;
7996 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7997 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8000 bfd_put_32 (output_bfd, 1, sgot->contents + off);
8002 globals->tls_ldm_got.offset |= 1;
8005 value = sgot->output_section->vma + sgot->output_offset + off
8006 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8008 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8009 contents, rel->r_offset, value,
8013 case R_ARM_TLS_GD32:
8014 case R_ARM_TLS_IE32:
8027 dyn = globals->root.dynamic_sections_created;
8028 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8030 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8032 *unresolved_reloc_p = FALSE;
8035 off = h->got.offset;
8036 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8040 if (local_got_offsets == NULL)
8042 off = local_got_offsets[r_symndx];
8043 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8046 if (tls_type == GOT_UNKNOWN)
8053 bfd_boolean need_relocs = FALSE;
8054 Elf_Internal_Rela outrel;
8055 bfd_byte *loc = NULL;
8058 /* The GOT entries have not been initialized yet. Do it
8059 now, and emit any relocations. If both an IE GOT and a
8060 GD GOT are necessary, we emit the GD first. */
8062 if ((info->shared || indx != 0)
8064 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8065 || h->root.type != bfd_link_hash_undefweak))
8068 if (srelgot == NULL)
8070 loc = srelgot->contents;
8071 loc += srelgot->reloc_count * RELOC_SIZE (globals);
8074 if (tls_type & GOT_TLS_GD)
8078 outrel.r_addend = 0;
8079 outrel.r_offset = (sgot->output_section->vma
8080 + sgot->output_offset
8082 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8084 if (globals->use_rel)
8085 bfd_put_32 (output_bfd, outrel.r_addend,
8086 sgot->contents + cur_off);
8088 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8089 srelgot->reloc_count++;
8090 loc += RELOC_SIZE (globals);
8093 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8094 sgot->contents + cur_off + 4);
8097 outrel.r_addend = 0;
8098 outrel.r_info = ELF32_R_INFO (indx,
8099 R_ARM_TLS_DTPOFF32);
8100 outrel.r_offset += 4;
8102 if (globals->use_rel)
8103 bfd_put_32 (output_bfd, outrel.r_addend,
8104 sgot->contents + cur_off + 4);
8107 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8108 srelgot->reloc_count++;
8109 loc += RELOC_SIZE (globals);
8114 /* If we are not emitting relocations for a
8115 general dynamic reference, then we must be in a
8116 static link or an executable link with the
8117 symbol binding locally. Mark it as belonging
8118 to module 1, the executable. */
8119 bfd_put_32 (output_bfd, 1,
8120 sgot->contents + cur_off);
8121 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8122 sgot->contents + cur_off + 4);
8128 if (tls_type & GOT_TLS_IE)
8133 outrel.r_addend = value - dtpoff_base (info);
8135 outrel.r_addend = 0;
8136 outrel.r_offset = (sgot->output_section->vma
8137 + sgot->output_offset
8139 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8141 if (globals->use_rel)
8142 bfd_put_32 (output_bfd, outrel.r_addend,
8143 sgot->contents + cur_off);
8145 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8146 srelgot->reloc_count++;
8147 loc += RELOC_SIZE (globals);
8150 bfd_put_32 (output_bfd, tpoff (info, value),
8151 sgot->contents + cur_off);
8158 local_got_offsets[r_symndx] |= 1;
8161 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8163 value = sgot->output_section->vma + sgot->output_offset + off
8164 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8166 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8167 contents, rel->r_offset, value,
8171 case R_ARM_TLS_LE32:
8174 (*_bfd_error_handler)
8175 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8176 input_bfd, input_section,
8177 (long) rel->r_offset, howto->name);
8178 return (bfd_reloc_status_type) FALSE;
8181 value = tpoff (info, value);
8183 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8184 contents, rel->r_offset, value,
8188 if (globals->fix_v4bx)
8190 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8192 /* Ensure that we have a BX instruction. */
8193 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8195 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8197 /* Branch to veneer. */
8199 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8200 glue_addr -= input_section->output_section->vma
8201 + input_section->output_offset
8202 + rel->r_offset + 8;
8203 insn = (insn & 0xf0000000) | 0x0a000000
8204 | ((glue_addr >> 2) & 0x00ffffff);
8208 /* Preserve Rm (lowest four bits) and the condition code
8209 (highest four bits). Other bits encode MOV PC,Rm. */
8210 insn = (insn & 0xf000000f) | 0x01a0f000;
8213 bfd_put_32 (input_bfd, insn, hit_data);
8215 return bfd_reloc_ok;
8217 case R_ARM_MOVW_ABS_NC:
8218 case R_ARM_MOVT_ABS:
8219 case R_ARM_MOVW_PREL_NC:
8220 case R_ARM_MOVT_PREL:
8221 /* Until we properly support segment-base-relative addressing then
8222 we assume the segment base to be zero, as for the group relocations.
8223 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8224 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8225 case R_ARM_MOVW_BREL_NC:
8226 case R_ARM_MOVW_BREL:
8227 case R_ARM_MOVT_BREL:
8229 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8231 if (globals->use_rel)
8233 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8234 signed_addend = (addend ^ 0x8000) - 0x8000;
8237 value += signed_addend;
8239 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8240 value -= (input_section->output_section->vma
8241 + input_section->output_offset + rel->r_offset);
8243 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8244 return bfd_reloc_overflow;
8246 if (sym_flags == STT_ARM_TFUNC)
8249 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8250 || r_type == R_ARM_MOVT_BREL)
8254 insn |= value & 0xfff;
8255 insn |= (value & 0xf000) << 4;
8256 bfd_put_32 (input_bfd, insn, hit_data);
8258 return bfd_reloc_ok;
8260 case R_ARM_THM_MOVW_ABS_NC:
8261 case R_ARM_THM_MOVT_ABS:
8262 case R_ARM_THM_MOVW_PREL_NC:
8263 case R_ARM_THM_MOVT_PREL:
8264 /* Until we properly support segment-base-relative addressing then
8265 we assume the segment base to be zero, as for the above relocations.
8266 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8267 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8268 as R_ARM_THM_MOVT_ABS. */
8269 case R_ARM_THM_MOVW_BREL_NC:
8270 case R_ARM_THM_MOVW_BREL:
8271 case R_ARM_THM_MOVT_BREL:
8275 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8276 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8278 if (globals->use_rel)
8280 addend = ((insn >> 4) & 0xf000)
8281 | ((insn >> 15) & 0x0800)
8282 | ((insn >> 4) & 0x0700)
8284 signed_addend = (addend ^ 0x8000) - 0x8000;
8287 value += signed_addend;
8289 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8290 value -= (input_section->output_section->vma
8291 + input_section->output_offset + rel->r_offset);
8293 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8294 return bfd_reloc_overflow;
8296 if (sym_flags == STT_ARM_TFUNC)
8299 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8300 || r_type == R_ARM_THM_MOVT_BREL)
8304 insn |= (value & 0xf000) << 4;
8305 insn |= (value & 0x0800) << 15;
8306 insn |= (value & 0x0700) << 4;
8307 insn |= (value & 0x00ff);
8309 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8310 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8312 return bfd_reloc_ok;
8314 case R_ARM_ALU_PC_G0_NC:
8315 case R_ARM_ALU_PC_G1_NC:
8316 case R_ARM_ALU_PC_G0:
8317 case R_ARM_ALU_PC_G1:
8318 case R_ARM_ALU_PC_G2:
8319 case R_ARM_ALU_SB_G0_NC:
8320 case R_ARM_ALU_SB_G1_NC:
8321 case R_ARM_ALU_SB_G0:
8322 case R_ARM_ALU_SB_G1:
8323 case R_ARM_ALU_SB_G2:
8325 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8326 bfd_vma pc = input_section->output_section->vma
8327 + input_section->output_offset + rel->r_offset;
8328 /* sb should be the origin of the *segment* containing the symbol.
8329 It is not clear how to obtain this OS-dependent value, so we
8330 make an arbitrary choice of zero. */
8334 bfd_signed_vma signed_value;
8337 /* Determine which group of bits to select. */
8340 case R_ARM_ALU_PC_G0_NC:
8341 case R_ARM_ALU_PC_G0:
8342 case R_ARM_ALU_SB_G0_NC:
8343 case R_ARM_ALU_SB_G0:
8347 case R_ARM_ALU_PC_G1_NC:
8348 case R_ARM_ALU_PC_G1:
8349 case R_ARM_ALU_SB_G1_NC:
8350 case R_ARM_ALU_SB_G1:
8354 case R_ARM_ALU_PC_G2:
8355 case R_ARM_ALU_SB_G2:
8363 /* If REL, extract the addend from the insn. If RELA, it will
8364 have already been fetched for us. */
8365 if (globals->use_rel)
8368 bfd_vma constant = insn & 0xff;
8369 bfd_vma rotation = (insn & 0xf00) >> 8;
8372 signed_addend = constant;
8375 /* Compensate for the fact that in the instruction, the
8376 rotation is stored in multiples of 2 bits. */
8379 /* Rotate "constant" right by "rotation" bits. */
8380 signed_addend = (constant >> rotation) |
8381 (constant << (8 * sizeof (bfd_vma) - rotation));
8384 /* Determine if the instruction is an ADD or a SUB.
8385 (For REL, this determines the sign of the addend.) */
8386 negative = identify_add_or_sub (insn);
8389 (*_bfd_error_handler)
8390 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8391 input_bfd, input_section,
8392 (long) rel->r_offset, howto->name);
8393 return bfd_reloc_overflow;
8396 signed_addend *= negative;
8399 /* Compute the value (X) to go in the place. */
8400 if (r_type == R_ARM_ALU_PC_G0_NC
8401 || r_type == R_ARM_ALU_PC_G1_NC
8402 || r_type == R_ARM_ALU_PC_G0
8403 || r_type == R_ARM_ALU_PC_G1
8404 || r_type == R_ARM_ALU_PC_G2)
8406 signed_value = value - pc + signed_addend;
8408 /* Section base relative. */
8409 signed_value = value - sb + signed_addend;
8411 /* If the target symbol is a Thumb function, then set the
8412 Thumb bit in the address. */
8413 if (sym_flags == STT_ARM_TFUNC)
8416 /* Calculate the value of the relevant G_n, in encoded
8417 constant-with-rotation format. */
8418 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8421 /* Check for overflow if required. */
8422 if ((r_type == R_ARM_ALU_PC_G0
8423 || r_type == R_ARM_ALU_PC_G1
8424 || r_type == R_ARM_ALU_PC_G2
8425 || r_type == R_ARM_ALU_SB_G0
8426 || r_type == R_ARM_ALU_SB_G1
8427 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8429 (*_bfd_error_handler)
8430 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8431 input_bfd, input_section,
8432 (long) rel->r_offset, abs (signed_value), howto->name);
8433 return bfd_reloc_overflow;
8436 /* Mask out the value and the ADD/SUB part of the opcode; take care
8437 not to destroy the S bit. */
8440 /* Set the opcode according to whether the value to go in the
8441 place is negative. */
8442 if (signed_value < 0)
8447 /* Encode the offset. */
8450 bfd_put_32 (input_bfd, insn, hit_data);
8452 return bfd_reloc_ok;
8454 case R_ARM_LDR_PC_G0:
8455 case R_ARM_LDR_PC_G1:
8456 case R_ARM_LDR_PC_G2:
8457 case R_ARM_LDR_SB_G0:
8458 case R_ARM_LDR_SB_G1:
8459 case R_ARM_LDR_SB_G2:
8461 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8462 bfd_vma pc = input_section->output_section->vma
8463 + input_section->output_offset + rel->r_offset;
8464 bfd_vma sb = 0; /* See note above. */
8466 bfd_signed_vma signed_value;
8469 /* Determine which groups of bits to calculate. */
8472 case R_ARM_LDR_PC_G0:
8473 case R_ARM_LDR_SB_G0:
8477 case R_ARM_LDR_PC_G1:
8478 case R_ARM_LDR_SB_G1:
8482 case R_ARM_LDR_PC_G2:
8483 case R_ARM_LDR_SB_G2:
8491 /* If REL, extract the addend from the insn. If RELA, it will
8492 have already been fetched for us. */
8493 if (globals->use_rel)
8495 int negative = (insn & (1 << 23)) ? 1 : -1;
8496 signed_addend = negative * (insn & 0xfff);
8499 /* Compute the value (X) to go in the place. */
8500 if (r_type == R_ARM_LDR_PC_G0
8501 || r_type == R_ARM_LDR_PC_G1
8502 || r_type == R_ARM_LDR_PC_G2)
8504 signed_value = value - pc + signed_addend;
8506 /* Section base relative. */
8507 signed_value = value - sb + signed_addend;
8509 /* Calculate the value of the relevant G_{n-1} to obtain
8510 the residual at that stage. */
8511 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8513 /* Check for overflow. */
8514 if (residual >= 0x1000)
8516 (*_bfd_error_handler)
8517 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8518 input_bfd, input_section,
8519 (long) rel->r_offset, abs (signed_value), howto->name);
8520 return bfd_reloc_overflow;
8523 /* Mask out the value and U bit. */
8526 /* Set the U bit if the value to go in the place is non-negative. */
8527 if (signed_value >= 0)
8530 /* Encode the offset. */
8533 bfd_put_32 (input_bfd, insn, hit_data);
8535 return bfd_reloc_ok;
8537 case R_ARM_LDRS_PC_G0:
8538 case R_ARM_LDRS_PC_G1:
8539 case R_ARM_LDRS_PC_G2:
8540 case R_ARM_LDRS_SB_G0:
8541 case R_ARM_LDRS_SB_G1:
8542 case R_ARM_LDRS_SB_G2:
8544 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8545 bfd_vma pc = input_section->output_section->vma
8546 + input_section->output_offset + rel->r_offset;
8547 bfd_vma sb = 0; /* See note above. */
8549 bfd_signed_vma signed_value;
8552 /* Determine which groups of bits to calculate. */
8555 case R_ARM_LDRS_PC_G0:
8556 case R_ARM_LDRS_SB_G0:
8560 case R_ARM_LDRS_PC_G1:
8561 case R_ARM_LDRS_SB_G1:
8565 case R_ARM_LDRS_PC_G2:
8566 case R_ARM_LDRS_SB_G2:
8574 /* If REL, extract the addend from the insn. If RELA, it will
8575 have already been fetched for us. */
8576 if (globals->use_rel)
8578 int negative = (insn & (1 << 23)) ? 1 : -1;
8579 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8582 /* Compute the value (X) to go in the place. */
8583 if (r_type == R_ARM_LDRS_PC_G0
8584 || r_type == R_ARM_LDRS_PC_G1
8585 || r_type == R_ARM_LDRS_PC_G2)
8587 signed_value = value - pc + signed_addend;
8589 /* Section base relative. */
8590 signed_value = value - sb + signed_addend;
8592 /* Calculate the value of the relevant G_{n-1} to obtain
8593 the residual at that stage. */
8594 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8596 /* Check for overflow. */
8597 if (residual >= 0x100)
8599 (*_bfd_error_handler)
8600 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8601 input_bfd, input_section,
8602 (long) rel->r_offset, abs (signed_value), howto->name);
8603 return bfd_reloc_overflow;
8606 /* Mask out the value and U bit. */
8609 /* Set the U bit if the value to go in the place is non-negative. */
8610 if (signed_value >= 0)
8613 /* Encode the offset. */
8614 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8616 bfd_put_32 (input_bfd, insn, hit_data);
8618 return bfd_reloc_ok;
8620 case R_ARM_LDC_PC_G0:
8621 case R_ARM_LDC_PC_G1:
8622 case R_ARM_LDC_PC_G2:
8623 case R_ARM_LDC_SB_G0:
8624 case R_ARM_LDC_SB_G1:
8625 case R_ARM_LDC_SB_G2:
8627 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8628 bfd_vma pc = input_section->output_section->vma
8629 + input_section->output_offset + rel->r_offset;
8630 bfd_vma sb = 0; /* See note above. */
8632 bfd_signed_vma signed_value;
8635 /* Determine which groups of bits to calculate. */
8638 case R_ARM_LDC_PC_G0:
8639 case R_ARM_LDC_SB_G0:
8643 case R_ARM_LDC_PC_G1:
8644 case R_ARM_LDC_SB_G1:
8648 case R_ARM_LDC_PC_G2:
8649 case R_ARM_LDC_SB_G2:
8657 /* If REL, extract the addend from the insn. If RELA, it will
8658 have already been fetched for us. */
8659 if (globals->use_rel)
8661 int negative = (insn & (1 << 23)) ? 1 : -1;
8662 signed_addend = negative * ((insn & 0xff) << 2);
8665 /* Compute the value (X) to go in the place. */
8666 if (r_type == R_ARM_LDC_PC_G0
8667 || r_type == R_ARM_LDC_PC_G1
8668 || r_type == R_ARM_LDC_PC_G2)
8670 signed_value = value - pc + signed_addend;
8672 /* Section base relative. */
8673 signed_value = value - sb + signed_addend;
8675 /* Calculate the value of the relevant G_{n-1} to obtain
8676 the residual at that stage. */
8677 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8679 /* Check for overflow. (The absolute value to go in the place must be
8680 divisible by four and, after having been divided by four, must
8681 fit in eight bits.) */
8682 if ((residual & 0x3) != 0 || residual >= 0x400)
8684 (*_bfd_error_handler)
8685 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8686 input_bfd, input_section,
8687 (long) rel->r_offset, abs (signed_value), howto->name);
8688 return bfd_reloc_overflow;
8691 /* Mask out the value and U bit. */
8694 /* Set the U bit if the value to go in the place is non-negative. */
8695 if (signed_value >= 0)
8698 /* Encode the offset. */
8699 insn |= residual >> 2;
8701 bfd_put_32 (input_bfd, insn, hit_data);
8703 return bfd_reloc_ok;
8706 return bfd_reloc_notsupported;
8710 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8712 arm_add_to_rel (bfd * abfd,
8714 reloc_howto_type * howto,
8715 bfd_signed_vma increment)
8717 bfd_signed_vma addend;
8719 if (howto->type == R_ARM_THM_CALL
8720 || howto->type == R_ARM_THM_JUMP24)
8722 int upper_insn, lower_insn;
8725 upper_insn = bfd_get_16 (abfd, address);
8726 lower_insn = bfd_get_16 (abfd, address + 2);
8727 upper = upper_insn & 0x7ff;
8728 lower = lower_insn & 0x7ff;
8730 addend = (upper << 12) | (lower << 1);
8731 addend += increment;
8734 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8735 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8737 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8738 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8744 contents = bfd_get_32 (abfd, address);
8746 /* Get the (signed) value from the instruction. */
8747 addend = contents & howto->src_mask;
8748 if (addend & ((howto->src_mask + 1) >> 1))
8750 bfd_signed_vma mask;
8753 mask &= ~ howto->src_mask;
8757 /* Add in the increment, (which is a byte value). */
8758 switch (howto->type)
8761 addend += increment;
8768 addend <<= howto->size;
8769 addend += increment;
8771 /* Should we check for overflow here ? */
8773 /* Drop any undesired bits. */
8774 addend >>= howto->rightshift;
8778 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8780 bfd_put_32 (abfd, contents, address);
8784 #define IS_ARM_TLS_RELOC(R_TYPE) \
8785 ((R_TYPE) == R_ARM_TLS_GD32 \
8786 || (R_TYPE) == R_ARM_TLS_LDO32 \
8787 || (R_TYPE) == R_ARM_TLS_LDM32 \
8788 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8789 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8790 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8791 || (R_TYPE) == R_ARM_TLS_LE32 \
8792 || (R_TYPE) == R_ARM_TLS_IE32)
8794 /* Relocate an ARM ELF section. */
8797 elf32_arm_relocate_section (bfd * output_bfd,
8798 struct bfd_link_info * info,
8800 asection * input_section,
8801 bfd_byte * contents,
8802 Elf_Internal_Rela * relocs,
8803 Elf_Internal_Sym * local_syms,
8804 asection ** local_sections)
8806 Elf_Internal_Shdr *symtab_hdr;
8807 struct elf_link_hash_entry **sym_hashes;
8808 Elf_Internal_Rela *rel;
8809 Elf_Internal_Rela *relend;
8811 struct elf32_arm_link_hash_table * globals;
8813 globals = elf32_arm_hash_table (info);
8814 if (globals == NULL)
8817 symtab_hdr = & elf_symtab_hdr (input_bfd);
8818 sym_hashes = elf_sym_hashes (input_bfd);
8821 relend = relocs + input_section->reloc_count;
8822 for (; rel < relend; rel++)
8825 reloc_howto_type * howto;
8826 unsigned long r_symndx;
8827 Elf_Internal_Sym * sym;
8829 struct elf_link_hash_entry * h;
8831 bfd_reloc_status_type r;
8834 bfd_boolean unresolved_reloc = FALSE;
8835 char *error_message = NULL;
8837 r_symndx = ELF32_R_SYM (rel->r_info);
8838 r_type = ELF32_R_TYPE (rel->r_info);
8839 r_type = arm_real_reloc_type (globals, r_type);
8841 if ( r_type == R_ARM_GNU_VTENTRY
8842 || r_type == R_ARM_GNU_VTINHERIT)
8845 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8846 howto = bfd_reloc.howto;
8852 if (r_symndx < symtab_hdr->sh_info)
8854 sym = local_syms + r_symndx;
8855 sym_type = ELF32_ST_TYPE (sym->st_info);
8856 sec = local_sections[r_symndx];
8858 /* An object file might have a reference to a local
8859 undefined symbol. This is a daft object file, but we
8860 should at least do something about it. V4BX & NONE
8861 relocations do not use the symbol and are explicitly
8862 allowed to use the undefined symbol, so allow those.
8863 Likewise for relocations against STN_UNDEF. */
8864 if (r_type != R_ARM_V4BX
8865 && r_type != R_ARM_NONE
8866 && r_symndx != STN_UNDEF
8867 && bfd_is_und_section (sec)
8868 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8870 if (!info->callbacks->undefined_symbol
8871 (info, bfd_elf_string_from_elf_section
8872 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8873 input_bfd, input_section,
8874 rel->r_offset, TRUE))
8878 if (globals->use_rel)
8880 relocation = (sec->output_section->vma
8881 + sec->output_offset
8883 if (!info->relocatable
8884 && (sec->flags & SEC_MERGE)
8885 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8888 bfd_vma addend, value;
8892 case R_ARM_MOVW_ABS_NC:
8893 case R_ARM_MOVT_ABS:
8894 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8895 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8896 addend = (addend ^ 0x8000) - 0x8000;
8899 case R_ARM_THM_MOVW_ABS_NC:
8900 case R_ARM_THM_MOVT_ABS:
8901 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8903 value |= bfd_get_16 (input_bfd,
8904 contents + rel->r_offset + 2);
8905 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8906 | ((value & 0x04000000) >> 15);
8907 addend = (addend ^ 0x8000) - 0x8000;
8911 if (howto->rightshift
8912 || (howto->src_mask & (howto->src_mask + 1)))
8914 (*_bfd_error_handler)
8915 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8916 input_bfd, input_section,
8917 (long) rel->r_offset, howto->name);
8921 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8923 /* Get the (signed) value from the instruction. */
8924 addend = value & howto->src_mask;
8925 if (addend & ((howto->src_mask + 1) >> 1))
8927 bfd_signed_vma mask;
8930 mask &= ~ howto->src_mask;
8938 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8940 addend += msec->output_section->vma + msec->output_offset;
8942 /* Cases here must match those in the preceeding
8943 switch statement. */
8946 case R_ARM_MOVW_ABS_NC:
8947 case R_ARM_MOVT_ABS:
8948 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8950 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8953 case R_ARM_THM_MOVW_ABS_NC:
8954 case R_ARM_THM_MOVT_ABS:
8955 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8956 | (addend & 0xff) | ((addend & 0x0800) << 15);
8957 bfd_put_16 (input_bfd, value >> 16,
8958 contents + rel->r_offset);
8959 bfd_put_16 (input_bfd, value,
8960 contents + rel->r_offset + 2);
8964 value = (value & ~ howto->dst_mask)
8965 | (addend & howto->dst_mask);
8966 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8972 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8978 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8979 r_symndx, symtab_hdr, sym_hashes,
8981 unresolved_reloc, warned);
8986 if (sec != NULL && elf_discarded_section (sec))
8987 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
8988 rel, relend, howto, contents);
8990 if (info->relocatable)
8992 /* This is a relocatable link. We don't have to change
8993 anything, unless the reloc is against a section symbol,
8994 in which case we have to adjust according to where the
8995 section symbol winds up in the output section. */
8996 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8998 if (globals->use_rel)
8999 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9000 howto, (bfd_signed_vma) sec->output_offset);
9002 rel->r_addend += sec->output_offset;
9008 name = h->root.root.string;
9011 name = (bfd_elf_string_from_elf_section
9012 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9013 if (name == NULL || *name == '\0')
9014 name = bfd_section_name (input_bfd, sec);
9017 if (r_symndx != STN_UNDEF
9018 && r_type != R_ARM_NONE
9020 || h->root.type == bfd_link_hash_defined
9021 || h->root.type == bfd_link_hash_defweak)
9022 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9024 (*_bfd_error_handler)
9025 ((sym_type == STT_TLS
9026 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9027 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9030 (long) rel->r_offset,
9035 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9036 input_section, contents, rel,
9037 relocation, info, sec, name,
9038 (h ? ELF_ST_TYPE (h->type) :
9039 ELF_ST_TYPE (sym->st_info)), h,
9040 &unresolved_reloc, &error_message);
9042 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9043 because such sections are not SEC_ALLOC and thus ld.so will
9044 not process them. */
9045 if (unresolved_reloc
9046 && !((input_section->flags & SEC_DEBUGGING) != 0
9049 (*_bfd_error_handler)
9050 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9053 (long) rel->r_offset,
9055 h->root.root.string);
9059 if (r != bfd_reloc_ok)
9063 case bfd_reloc_overflow:
9064 /* If the overflowing reloc was to an undefined symbol,
9065 we have already printed one error message and there
9066 is no point complaining again. */
9068 h->root.type != bfd_link_hash_undefined)
9069 && (!((*info->callbacks->reloc_overflow)
9070 (info, (h ? &h->root : NULL), name, howto->name,
9071 (bfd_vma) 0, input_bfd, input_section,
9076 case bfd_reloc_undefined:
9077 if (!((*info->callbacks->undefined_symbol)
9078 (info, name, input_bfd, input_section,
9079 rel->r_offset, TRUE)))
9083 case bfd_reloc_outofrange:
9084 error_message = _("out of range");
9087 case bfd_reloc_notsupported:
9088 error_message = _("unsupported relocation");
9091 case bfd_reloc_dangerous:
9092 /* error_message should already be set. */
9096 error_message = _("unknown error");
9100 BFD_ASSERT (error_message != NULL);
9101 if (!((*info->callbacks->reloc_dangerous)
9102 (info, error_message, input_bfd, input_section,
9113 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9114 adds the edit to the start of the list. (The list must be built in order of
9115 ascending TINDEX: the function's callers are primarily responsible for
9116 maintaining that condition). */
9119 add_unwind_table_edit (arm_unwind_table_edit **head,
9120 arm_unwind_table_edit **tail,
9121 arm_unwind_edit_type type,
9122 asection *linked_section,
9123 unsigned int tindex)
9125 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9126 xmalloc (sizeof (arm_unwind_table_edit));
9128 new_edit->type = type;
9129 new_edit->linked_section = linked_section;
9130 new_edit->index = tindex;
9134 new_edit->next = NULL;
9137 (*tail)->next = new_edit;
9146 new_edit->next = *head;
9155 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9157 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9159 adjust_exidx_size(asection *exidx_sec, int adjust)
9163 if (!exidx_sec->rawsize)
9164 exidx_sec->rawsize = exidx_sec->size;
9166 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9167 out_sec = exidx_sec->output_section;
9168 /* Adjust size of output section. */
9169 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9172 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9174 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9176 struct _arm_elf_section_data *exidx_arm_data;
9178 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9179 add_unwind_table_edit (
9180 &exidx_arm_data->u.exidx.unwind_edit_list,
9181 &exidx_arm_data->u.exidx.unwind_edit_tail,
9182 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9184 adjust_exidx_size(exidx_sec, 8);
9187 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9188 made to those tables, such that:
9190 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9191 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9192 codes which have been inlined into the index).
9194 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
9196 The edits are applied when the tables are written
9197 (in elf32_arm_write_section).
9201 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9202 unsigned int num_text_sections,
9203 struct bfd_link_info *info,
9204 bfd_boolean merge_exidx_entries)
9207 unsigned int last_second_word = 0, i;
9208 asection *last_exidx_sec = NULL;
9209 asection *last_text_sec = NULL;
9210 int last_unwind_type = -1;
9212 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9214 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9218 for (sec = inp->sections; sec != NULL; sec = sec->next)
9220 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9221 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9223 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9226 if (elf_sec->linked_to)
9228 Elf_Internal_Shdr *linked_hdr
9229 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9230 struct _arm_elf_section_data *linked_sec_arm_data
9231 = get_arm_elf_section_data (linked_hdr->bfd_section);
9233 if (linked_sec_arm_data == NULL)
9236 /* Link this .ARM.exidx section back from the text section it
9238 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9243 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9244 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9245 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9247 for (i = 0; i < num_text_sections; i++)
9249 asection *sec = text_section_order[i];
9250 asection *exidx_sec;
9251 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9252 struct _arm_elf_section_data *exidx_arm_data;
9253 bfd_byte *contents = NULL;
9254 int deleted_exidx_bytes = 0;
9256 arm_unwind_table_edit *unwind_edit_head = NULL;
9257 arm_unwind_table_edit *unwind_edit_tail = NULL;
9258 Elf_Internal_Shdr *hdr;
9261 if (arm_data == NULL)
9264 exidx_sec = arm_data->u.text.arm_exidx_sec;
9265 if (exidx_sec == NULL)
9267 /* Section has no unwind data. */
9268 if (last_unwind_type == 0 || !last_exidx_sec)
9271 /* Ignore zero sized sections. */
9275 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9276 last_unwind_type = 0;
9280 /* Skip /DISCARD/ sections. */
9281 if (bfd_is_abs_section (exidx_sec->output_section))
9284 hdr = &elf_section_data (exidx_sec)->this_hdr;
9285 if (hdr->sh_type != SHT_ARM_EXIDX)
9288 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9289 if (exidx_arm_data == NULL)
9292 ibfd = exidx_sec->owner;
9294 if (hdr->contents != NULL)
9295 contents = hdr->contents;
9296 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9300 for (j = 0; j < hdr->sh_size; j += 8)
9302 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9306 /* An EXIDX_CANTUNWIND entry. */
9307 if (second_word == 1)
9309 if (last_unwind_type == 0)
9313 /* Inlined unwinding data. Merge if equal to previous. */
9314 else if ((second_word & 0x80000000) != 0)
9316 if (merge_exidx_entries
9317 && last_second_word == second_word && last_unwind_type == 1)
9320 last_second_word = second_word;
9322 /* Normal table entry. In theory we could merge these too,
9323 but duplicate entries are likely to be much less common. */
9329 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9330 DELETE_EXIDX_ENTRY, NULL, j / 8);
9332 deleted_exidx_bytes += 8;
9335 last_unwind_type = unwind_type;
9338 /* Free contents if we allocated it ourselves. */
9339 if (contents != hdr->contents)
9342 /* Record edits to be applied later (in elf32_arm_write_section). */
9343 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9344 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9346 if (deleted_exidx_bytes > 0)
9347 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9349 last_exidx_sec = exidx_sec;
9350 last_text_sec = sec;
9353 /* Add terminating CANTUNWIND entry. */
9354 if (last_exidx_sec && last_unwind_type != 0)
9355 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9361 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9362 bfd *ibfd, const char *name)
9364 asection *sec, *osec;
9366 sec = bfd_get_section_by_name (ibfd, name);
9367 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9370 osec = sec->output_section;
9371 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9374 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9375 sec->output_offset, sec->size))
9382 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9384 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9385 asection *sec, *osec;
9387 if (globals == NULL)
9390 /* Invoke the regular ELF backend linker to do all the work. */
9391 if (!bfd_elf_final_link (abfd, info))
9394 /* Process stub sections (eg BE8 encoding, ...). */
9395 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9397 for (i=0; i<htab->top_id; i++)
9399 sec = htab->stub_group[i].stub_sec;
9400 /* Only process it once, in its link_sec slot. */
9401 if (sec && i == htab->stub_group[i].link_sec->id)
9403 osec = sec->output_section;
9404 elf32_arm_write_section (abfd, info, sec, sec->contents);
9405 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9406 sec->output_offset, sec->size))
9411 /* Write out any glue sections now that we have created all the
9413 if (globals->bfd_of_glue_owner != NULL)
9415 if (! elf32_arm_output_glue_section (info, abfd,
9416 globals->bfd_of_glue_owner,
9417 ARM2THUMB_GLUE_SECTION_NAME))
9420 if (! elf32_arm_output_glue_section (info, abfd,
9421 globals->bfd_of_glue_owner,
9422 THUMB2ARM_GLUE_SECTION_NAME))
9425 if (! elf32_arm_output_glue_section (info, abfd,
9426 globals->bfd_of_glue_owner,
9427 VFP11_ERRATUM_VENEER_SECTION_NAME))
9430 if (! elf32_arm_output_glue_section (info, abfd,
9431 globals->bfd_of_glue_owner,
9432 ARM_BX_GLUE_SECTION_NAME))
9439 /* Set the right machine number. */
9442 elf32_arm_object_p (bfd *abfd)
9446 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9448 if (mach != bfd_mach_arm_unknown)
9449 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9451 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9452 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9455 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9460 /* Function to keep ARM specific flags in the ELF header. */
9463 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9465 if (elf_flags_init (abfd)
9466 && elf_elfheader (abfd)->e_flags != flags)
9468 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9470 if (flags & EF_ARM_INTERWORK)
9471 (*_bfd_error_handler)
9472 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9476 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9482 elf_elfheader (abfd)->e_flags = flags;
9483 elf_flags_init (abfd) = TRUE;
9489 /* Copy backend specific data from one object module to another. */
9492 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9497 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9500 in_flags = elf_elfheader (ibfd)->e_flags;
9501 out_flags = elf_elfheader (obfd)->e_flags;
9503 if (elf_flags_init (obfd)
9504 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9505 && in_flags != out_flags)
9507 /* Cannot mix APCS26 and APCS32 code. */
9508 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9511 /* Cannot mix float APCS and non-float APCS code. */
9512 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9515 /* If the src and dest have different interworking flags
9516 then turn off the interworking bit. */
9517 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9519 if (out_flags & EF_ARM_INTERWORK)
9521 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9524 in_flags &= ~EF_ARM_INTERWORK;
9527 /* Likewise for PIC, though don't warn for this case. */
9528 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9529 in_flags &= ~EF_ARM_PIC;
9532 elf_elfheader (obfd)->e_flags = in_flags;
9533 elf_flags_init (obfd) = TRUE;
9535 /* Also copy the EI_OSABI field. */
9536 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9537 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9539 /* Copy object attributes. */
9540 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9545 /* Values for Tag_ABI_PCS_R9_use. */
9554 /* Values for Tag_ABI_PCS_RW_data. */
9557 AEABI_PCS_RW_data_absolute,
9558 AEABI_PCS_RW_data_PCrel,
9559 AEABI_PCS_RW_data_SBrel,
9560 AEABI_PCS_RW_data_unused
9563 /* Values for Tag_ABI_enum_size. */
9569 AEABI_enum_forced_wide
9572 /* Determine whether an object attribute tag takes an integer, a
9576 elf32_arm_obj_attrs_arg_type (int tag)
9578 if (tag == Tag_compatibility)
9579 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9580 else if (tag == Tag_nodefaults)
9581 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9582 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9583 return ATTR_TYPE_FLAG_STR_VAL;
9585 return ATTR_TYPE_FLAG_INT_VAL;
9587 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9590 /* The ABI defines that Tag_conformance should be emitted first, and that
9591 Tag_nodefaults should be second (if either is defined). This sets those
9592 two positions, and bumps up the position of all the remaining tags to
9595 elf32_arm_obj_attrs_order (int num)
9597 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
9598 return Tag_conformance;
9599 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
9600 return Tag_nodefaults;
9601 if ((num - 2) < Tag_nodefaults)
9603 if ((num - 1) < Tag_conformance)
9608 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9610 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
9612 if ((tag & 127) < 64)
9615 (_("%B: Unknown mandatory EABI object attribute %d"),
9617 bfd_set_error (bfd_error_bad_value);
9623 (_("Warning: %B: Unknown EABI object attribute %d"),
9629 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9630 Returns -1 if no architecture could be read. */
9633 get_secondary_compatible_arch (bfd *abfd)
9635 obj_attribute *attr =
9636 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9638 /* Note: the tag and its argument below are uleb128 values, though
9639 currently-defined values fit in one byte for each. */
9641 && attr->s[0] == Tag_CPU_arch
9642 && (attr->s[1] & 128) != 128
9646 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9650 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9651 The tag is removed if ARCH is -1. */
9654 set_secondary_compatible_arch (bfd *abfd, int arch)
9656 obj_attribute *attr =
9657 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9665 /* Note: the tag and its argument below are uleb128 values, though
9666 currently-defined values fit in one byte for each. */
9668 attr->s = (char *) bfd_alloc (abfd, 3);
9669 attr->s[0] = Tag_CPU_arch;
9674 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9678 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9679 int newtag, int secondary_compat)
9681 #define T(X) TAG_CPU_ARCH_##X
9682 int tagl, tagh, result;
9685 T(V6T2), /* PRE_V4. */
9689 T(V6T2), /* V5TE. */
9690 T(V6T2), /* V5TEJ. */
9697 T(V6K), /* PRE_V4. */
9702 T(V6K), /* V5TEJ. */
9704 T(V6KZ), /* V6KZ. */
9710 T(V7), /* PRE_V4. */
9729 T(V6K), /* V5TEJ. */
9731 T(V6KZ), /* V6KZ. */
9744 T(V6K), /* V5TEJ. */
9746 T(V6KZ), /* V6KZ. */
9750 T(V6S_M), /* V6_M. */
9751 T(V6S_M) /* V6S_M. */
9757 T(V7E_M), /* V4T. */
9758 T(V7E_M), /* V5T. */
9759 T(V7E_M), /* V5TE. */
9760 T(V7E_M), /* V5TEJ. */
9762 T(V7E_M), /* V6KZ. */
9763 T(V7E_M), /* V6T2. */
9764 T(V7E_M), /* V6K. */
9766 T(V7E_M), /* V6_M. */
9767 T(V7E_M), /* V6S_M. */
9768 T(V7E_M) /* V7E_M. */
9770 const int v4t_plus_v6_m[] =
9776 T(V5TE), /* V5TE. */
9777 T(V5TEJ), /* V5TEJ. */
9779 T(V6KZ), /* V6KZ. */
9780 T(V6T2), /* V6T2. */
9783 T(V6_M), /* V6_M. */
9784 T(V6S_M), /* V6S_M. */
9785 T(V7E_M), /* V7E_M. */
9786 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9796 /* Pseudo-architecture. */
9800 /* Check we've not got a higher architecture than we know about. */
9802 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9804 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9808 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9810 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9811 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9812 oldtag = T(V4T_PLUS_V6_M);
9814 /* And override the new tag if we have a Tag_also_compatible_with on the
9817 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9818 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9819 newtag = T(V4T_PLUS_V6_M);
9821 tagl = (oldtag < newtag) ? oldtag : newtag;
9822 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9824 /* Architectures before V6KZ add features monotonically. */
9825 if (tagh <= TAG_CPU_ARCH_V6KZ)
9828 result = comb[tagh - T(V6T2)][tagl];
9830 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9831 as the canonical version. */
9832 if (result == T(V4T_PLUS_V6_M))
9835 *secondary_compat_out = T(V6_M);
9838 *secondary_compat_out = -1;
9842 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9843 ibfd, oldtag, newtag);
9851 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9852 are conflicting attributes. */
9855 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9857 obj_attribute *in_attr;
9858 obj_attribute *out_attr;
9859 /* Some tags have 0 = don't care, 1 = strong requirement,
9860 2 = weak requirement. */
9861 static const int order_021[3] = {0, 2, 1};
9863 bfd_boolean result = TRUE;
9865 /* Skip the linker stubs file. This preserves previous behavior
9866 of accepting unknown attributes in the first input file - but
9868 if (ibfd->flags & BFD_LINKER_CREATED)
9871 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9873 /* This is the first object. Copy the attributes. */
9874 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9876 out_attr = elf_known_obj_attributes_proc (obfd);
9878 /* Use the Tag_null value to indicate the attributes have been
9882 /* We do not output objects with Tag_MPextension_use_legacy - we move
9883 the attribute's value to Tag_MPextension_use. */
9884 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9886 if (out_attr[Tag_MPextension_use].i != 0
9887 && out_attr[Tag_MPextension_use_legacy].i
9888 != out_attr[Tag_MPextension_use].i)
9891 (_("Error: %B has both the current and legacy "
9892 "Tag_MPextension_use attributes"), ibfd);
9896 out_attr[Tag_MPextension_use] =
9897 out_attr[Tag_MPextension_use_legacy];
9898 out_attr[Tag_MPextension_use_legacy].type = 0;
9899 out_attr[Tag_MPextension_use_legacy].i = 0;
9905 in_attr = elf_known_obj_attributes_proc (ibfd);
9906 out_attr = elf_known_obj_attributes_proc (obfd);
9907 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9908 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9910 /* Ignore mismatches if the object doesn't use floating point. */
9911 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9912 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9913 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9916 (_("error: %B uses VFP register arguments, %B does not"),
9917 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9918 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9923 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9925 /* Merge this attribute with existing attributes. */
9928 case Tag_CPU_raw_name:
9930 /* These are merged after Tag_CPU_arch. */
9933 case Tag_ABI_optimization_goals:
9934 case Tag_ABI_FP_optimization_goals:
9935 /* Use the first value seen. */
9940 int secondary_compat = -1, secondary_compat_out = -1;
9941 unsigned int saved_out_attr = out_attr[i].i;
9942 static const char *name_table[] = {
9943 /* These aren't real CPU names, but we can't guess
9944 that from the architecture version alone. */
9960 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9961 secondary_compat = get_secondary_compatible_arch (ibfd);
9962 secondary_compat_out = get_secondary_compatible_arch (obfd);
9963 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9964 &secondary_compat_out,
9967 set_secondary_compatible_arch (obfd, secondary_compat_out);
9969 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9970 if (out_attr[i].i == saved_out_attr)
9971 ; /* Leave the names alone. */
9972 else if (out_attr[i].i == in_attr[i].i)
9974 /* The output architecture has been changed to match the
9975 input architecture. Use the input names. */
9976 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9977 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9979 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9980 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9985 out_attr[Tag_CPU_name].s = NULL;
9986 out_attr[Tag_CPU_raw_name].s = NULL;
9989 /* If we still don't have a value for Tag_CPU_name,
9990 make one up now. Tag_CPU_raw_name remains blank. */
9991 if (out_attr[Tag_CPU_name].s == NULL
9992 && out_attr[i].i < ARRAY_SIZE (name_table))
9993 out_attr[Tag_CPU_name].s =
9994 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9998 case Tag_ARM_ISA_use:
9999 case Tag_THUMB_ISA_use:
10000 case Tag_WMMX_arch:
10001 case Tag_Advanced_SIMD_arch:
10002 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
10003 case Tag_ABI_FP_rounding:
10004 case Tag_ABI_FP_exceptions:
10005 case Tag_ABI_FP_user_exceptions:
10006 case Tag_ABI_FP_number_model:
10007 case Tag_FP_HP_extension:
10008 case Tag_CPU_unaligned_access:
10010 case Tag_MPextension_use:
10011 /* Use the largest value specified. */
10012 if (in_attr[i].i > out_attr[i].i)
10013 out_attr[i].i = in_attr[i].i;
10016 case Tag_ABI_align_preserved:
10017 case Tag_ABI_PCS_RO_data:
10018 /* Use the smallest value specified. */
10019 if (in_attr[i].i < out_attr[i].i)
10020 out_attr[i].i = in_attr[i].i;
10023 case Tag_ABI_align_needed:
10024 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10025 && (in_attr[Tag_ABI_align_preserved].i == 0
10026 || out_attr[Tag_ABI_align_preserved].i == 0))
10028 /* This error message should be enabled once all non-conformant
10029 binaries in the toolchain have had the attributes set
10032 (_("error: %B: 8-byte data alignment conflicts with %B"),
10036 /* Fall through. */
10037 case Tag_ABI_FP_denormal:
10038 case Tag_ABI_PCS_GOT_use:
10039 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10040 value if greater than 2 (for future-proofing). */
10041 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10042 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10043 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10044 out_attr[i].i = in_attr[i].i;
10047 case Tag_Virtualization_use:
10048 /* The virtualization tag effectively stores two bits of
10049 information: the intended use of TrustZone (in bit 0), and the
10050 intended use of Virtualization (in bit 1). */
10051 if (out_attr[i].i == 0)
10052 out_attr[i].i = in_attr[i].i;
10053 else if (in_attr[i].i != 0
10054 && in_attr[i].i != out_attr[i].i)
10056 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
10061 (_("error: %B: unable to merge virtualization attributes "
10069 case Tag_CPU_arch_profile:
10070 if (out_attr[i].i != in_attr[i].i)
10072 /* 0 will merge with anything.
10073 'A' and 'S' merge to 'A'.
10074 'R' and 'S' merge to 'R'.
10075 'M' and 'A|R|S' is an error. */
10076 if (out_attr[i].i == 0
10077 || (out_attr[i].i == 'S'
10078 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10079 out_attr[i].i = in_attr[i].i;
10080 else if (in_attr[i].i == 0
10081 || (in_attr[i].i == 'S'
10082 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10083 ; /* Do nothing. */
10087 (_("error: %B: Conflicting architecture profiles %c/%c"),
10089 in_attr[i].i ? in_attr[i].i : '0',
10090 out_attr[i].i ? out_attr[i].i : '0');
10097 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
10098 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
10099 when it's 0. It might mean absence of FP hardware if
10100 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
10102 static const struct
10106 } vfp_versions[7] =
10120 /* If the output has no requirement about FP hardware,
10121 follow the requirement of the input. */
10122 if (out_attr[i].i == 0)
10124 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
10125 out_attr[i].i = in_attr[i].i;
10126 out_attr[Tag_ABI_HardFP_use].i
10127 = in_attr[Tag_ABI_HardFP_use].i;
10130 /* If the input has no requirement about FP hardware, do
10132 else if (in_attr[i].i == 0)
10134 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
10138 /* Both the input and the output have nonzero Tag_FP_arch.
10139 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
10141 /* If both the input and the output have zero Tag_ABI_HardFP_use,
10143 if (in_attr[Tag_ABI_HardFP_use].i == 0
10144 && out_attr[Tag_ABI_HardFP_use].i == 0)
10146 /* If the input and the output have different Tag_ABI_HardFP_use,
10147 the combination of them is 3 (SP & DP). */
10148 else if (in_attr[Tag_ABI_HardFP_use].i
10149 != out_attr[Tag_ABI_HardFP_use].i)
10150 out_attr[Tag_ABI_HardFP_use].i = 3;
10152 /* Now we can handle Tag_FP_arch. */
10154 /* Values greater than 6 aren't defined, so just pick the
10156 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10158 out_attr[i] = in_attr[i];
10161 /* The output uses the superset of input features
10162 (ISA version) and registers. */
10163 ver = vfp_versions[in_attr[i].i].ver;
10164 if (ver < vfp_versions[out_attr[i].i].ver)
10165 ver = vfp_versions[out_attr[i].i].ver;
10166 regs = vfp_versions[in_attr[i].i].regs;
10167 if (regs < vfp_versions[out_attr[i].i].regs)
10168 regs = vfp_versions[out_attr[i].i].regs;
10169 /* This assumes all possible supersets are also a valid
10171 for (newval = 6; newval > 0; newval--)
10173 if (regs == vfp_versions[newval].regs
10174 && ver == vfp_versions[newval].ver)
10177 out_attr[i].i = newval;
10180 case Tag_PCS_config:
10181 if (out_attr[i].i == 0)
10182 out_attr[i].i = in_attr[i].i;
10183 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10185 /* It's sometimes ok to mix different configs, so this is only
10188 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10191 case Tag_ABI_PCS_R9_use:
10192 if (in_attr[i].i != out_attr[i].i
10193 && out_attr[i].i != AEABI_R9_unused
10194 && in_attr[i].i != AEABI_R9_unused)
10197 (_("error: %B: Conflicting use of R9"), ibfd);
10200 if (out_attr[i].i == AEABI_R9_unused)
10201 out_attr[i].i = in_attr[i].i;
10203 case Tag_ABI_PCS_RW_data:
10204 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10205 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10206 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10209 (_("error: %B: SB relative addressing conflicts with use of R9"),
10213 /* Use the smallest value specified. */
10214 if (in_attr[i].i < out_attr[i].i)
10215 out_attr[i].i = in_attr[i].i;
10217 case Tag_ABI_PCS_wchar_t:
10218 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10219 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10222 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10223 ibfd, in_attr[i].i, out_attr[i].i);
10225 else if (in_attr[i].i && !out_attr[i].i)
10226 out_attr[i].i = in_attr[i].i;
10228 case Tag_ABI_enum_size:
10229 if (in_attr[i].i != AEABI_enum_unused)
10231 if (out_attr[i].i == AEABI_enum_unused
10232 || out_attr[i].i == AEABI_enum_forced_wide)
10234 /* The existing object is compatible with anything.
10235 Use whatever requirements the new object has. */
10236 out_attr[i].i = in_attr[i].i;
10238 else if (in_attr[i].i != AEABI_enum_forced_wide
10239 && out_attr[i].i != in_attr[i].i
10240 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10242 static const char *aeabi_enum_names[] =
10243 { "", "variable-size", "32-bit", "" };
10244 const char *in_name =
10245 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10246 ? aeabi_enum_names[in_attr[i].i]
10248 const char *out_name =
10249 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10250 ? aeabi_enum_names[out_attr[i].i]
10253 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10254 ibfd, in_name, out_name);
10258 case Tag_ABI_VFP_args:
10261 case Tag_ABI_WMMX_args:
10262 if (in_attr[i].i != out_attr[i].i)
10265 (_("error: %B uses iWMMXt register arguments, %B does not"),
10270 case Tag_compatibility:
10271 /* Merged in target-independent code. */
10273 case Tag_ABI_HardFP_use:
10274 /* This is handled along with Tag_FP_arch. */
10276 case Tag_ABI_FP_16bit_format:
10277 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10279 if (in_attr[i].i != out_attr[i].i)
10282 (_("error: fp16 format mismatch between %B and %B"),
10287 if (in_attr[i].i != 0)
10288 out_attr[i].i = in_attr[i].i;
10292 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10293 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10294 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10295 CPU. We will merge as follows: If the input attribute's value
10296 is one then the output attribute's value remains unchanged. If
10297 the input attribute's value is zero or two then if the output
10298 attribute's value is one the output value is set to the input
10299 value, otherwise the output value must be the same as the
10301 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10303 if (in_attr[i].i != out_attr[i].i)
10306 (_("DIV usage mismatch between %B and %B"),
10312 if (in_attr[i].i != 1)
10313 out_attr[i].i = in_attr[i].i;
10317 case Tag_MPextension_use_legacy:
10318 /* We don't output objects with Tag_MPextension_use_legacy - we
10319 move the value to Tag_MPextension_use. */
10320 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10322 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10325 (_("%B has has both the current and legacy "
10326 "Tag_MPextension_use attributes"),
10332 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10333 out_attr[Tag_MPextension_use] = in_attr[i];
10337 case Tag_nodefaults:
10338 /* This tag is set if it exists, but the value is unused (and is
10339 typically zero). We don't actually need to do anything here -
10340 the merge happens automatically when the type flags are merged
10343 case Tag_also_compatible_with:
10344 /* Already done in Tag_CPU_arch. */
10346 case Tag_conformance:
10347 /* Keep the attribute if it matches. Throw it away otherwise.
10348 No attribute means no claim to conform. */
10349 if (!in_attr[i].s || !out_attr[i].s
10350 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10351 out_attr[i].s = NULL;
10356 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
10359 /* If out_attr was copied from in_attr then it won't have a type yet. */
10360 if (in_attr[i].type && !out_attr[i].type)
10361 out_attr[i].type = in_attr[i].type;
10364 /* Merge Tag_compatibility attributes and any common GNU ones. */
10365 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10368 /* Check for any attributes not known on ARM. */
10369 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
10375 /* Return TRUE if the two EABI versions are incompatible. */
10378 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10380 /* v4 and v5 are the same spec before and after it was released,
10381 so allow mixing them. */
10382 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10383 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10386 return (iver == over);
10389 /* Merge backend specific data from an object file to the output
10390 object file when linking. */
10393 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10395 /* Display the flags field. */
10398 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10400 FILE * file = (FILE *) ptr;
10401 unsigned long flags;
10403 BFD_ASSERT (abfd != NULL && ptr != NULL);
10405 /* Print normal ELF private data. */
10406 _bfd_elf_print_private_bfd_data (abfd, ptr);
10408 flags = elf_elfheader (abfd)->e_flags;
10409 /* Ignore init flag - it may not be set, despite the flags field
10410 containing valid data. */
10412 /* xgettext:c-format */
10413 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10415 switch (EF_ARM_EABI_VERSION (flags))
10417 case EF_ARM_EABI_UNKNOWN:
10418 /* The following flag bits are GNU extensions and not part of the
10419 official ARM ELF extended ABI. Hence they are only decoded if
10420 the EABI version is not set. */
10421 if (flags & EF_ARM_INTERWORK)
10422 fprintf (file, _(" [interworking enabled]"));
10424 if (flags & EF_ARM_APCS_26)
10425 fprintf (file, " [APCS-26]");
10427 fprintf (file, " [APCS-32]");
10429 if (flags & EF_ARM_VFP_FLOAT)
10430 fprintf (file, _(" [VFP float format]"));
10431 else if (flags & EF_ARM_MAVERICK_FLOAT)
10432 fprintf (file, _(" [Maverick float format]"));
10434 fprintf (file, _(" [FPA float format]"));
10436 if (flags & EF_ARM_APCS_FLOAT)
10437 fprintf (file, _(" [floats passed in float registers]"));
10439 if (flags & EF_ARM_PIC)
10440 fprintf (file, _(" [position independent]"));
10442 if (flags & EF_ARM_NEW_ABI)
10443 fprintf (file, _(" [new ABI]"));
10445 if (flags & EF_ARM_OLD_ABI)
10446 fprintf (file, _(" [old ABI]"));
10448 if (flags & EF_ARM_SOFT_FLOAT)
10449 fprintf (file, _(" [software FP]"));
10451 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10452 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10453 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10454 | EF_ARM_MAVERICK_FLOAT);
10457 case EF_ARM_EABI_VER1:
10458 fprintf (file, _(" [Version1 EABI]"));
10460 if (flags & EF_ARM_SYMSARESORTED)
10461 fprintf (file, _(" [sorted symbol table]"));
10463 fprintf (file, _(" [unsorted symbol table]"));
10465 flags &= ~ EF_ARM_SYMSARESORTED;
10468 case EF_ARM_EABI_VER2:
10469 fprintf (file, _(" [Version2 EABI]"));
10471 if (flags & EF_ARM_SYMSARESORTED)
10472 fprintf (file, _(" [sorted symbol table]"));
10474 fprintf (file, _(" [unsorted symbol table]"));
10476 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10477 fprintf (file, _(" [dynamic symbols use segment index]"));
10479 if (flags & EF_ARM_MAPSYMSFIRST)
10480 fprintf (file, _(" [mapping symbols precede others]"));
10482 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10483 | EF_ARM_MAPSYMSFIRST);
10486 case EF_ARM_EABI_VER3:
10487 fprintf (file, _(" [Version3 EABI]"));
10490 case EF_ARM_EABI_VER4:
10491 fprintf (file, _(" [Version4 EABI]"));
10494 case EF_ARM_EABI_VER5:
10495 fprintf (file, _(" [Version5 EABI]"));
10497 if (flags & EF_ARM_BE8)
10498 fprintf (file, _(" [BE8]"));
10500 if (flags & EF_ARM_LE8)
10501 fprintf (file, _(" [LE8]"));
10503 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10507 fprintf (file, _(" <EABI version unrecognised>"));
10511 flags &= ~ EF_ARM_EABIMASK;
10513 if (flags & EF_ARM_RELEXEC)
10514 fprintf (file, _(" [relocatable executable]"));
10516 if (flags & EF_ARM_HASENTRY)
10517 fprintf (file, _(" [has entry point]"));
10519 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10522 fprintf (file, _("<Unrecognised flag bits set>"));
10524 fputc ('\n', file);
10530 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10532 switch (ELF_ST_TYPE (elf_sym->st_info))
10534 case STT_ARM_TFUNC:
10535 return ELF_ST_TYPE (elf_sym->st_info);
10537 case STT_ARM_16BIT:
10538 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10539 This allows us to distinguish between data used by Thumb instructions
10540 and non-data (which is probably code) inside Thumb regions of an
10542 if (type != STT_OBJECT && type != STT_TLS)
10543 return ELF_ST_TYPE (elf_sym->st_info);
10554 elf32_arm_gc_mark_hook (asection *sec,
10555 struct bfd_link_info *info,
10556 Elf_Internal_Rela *rel,
10557 struct elf_link_hash_entry *h,
10558 Elf_Internal_Sym *sym)
10561 switch (ELF32_R_TYPE (rel->r_info))
10563 case R_ARM_GNU_VTINHERIT:
10564 case R_ARM_GNU_VTENTRY:
10568 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10571 /* Update the got entry reference counts for the section being removed. */
10574 elf32_arm_gc_sweep_hook (bfd * abfd,
10575 struct bfd_link_info * info,
10577 const Elf_Internal_Rela * relocs)
10579 Elf_Internal_Shdr *symtab_hdr;
10580 struct elf_link_hash_entry **sym_hashes;
10581 bfd_signed_vma *local_got_refcounts;
10582 const Elf_Internal_Rela *rel, *relend;
10583 struct elf32_arm_link_hash_table * globals;
10585 if (info->relocatable)
10588 globals = elf32_arm_hash_table (info);
10589 if (globals == NULL)
10592 elf_section_data (sec)->local_dynrel = NULL;
10594 symtab_hdr = & elf_symtab_hdr (abfd);
10595 sym_hashes = elf_sym_hashes (abfd);
10596 local_got_refcounts = elf_local_got_refcounts (abfd);
10598 check_use_blx (globals);
10600 relend = relocs + sec->reloc_count;
10601 for (rel = relocs; rel < relend; rel++)
10603 unsigned long r_symndx;
10604 struct elf_link_hash_entry *h = NULL;
10607 r_symndx = ELF32_R_SYM (rel->r_info);
10608 if (r_symndx >= symtab_hdr->sh_info)
10610 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10611 while (h->root.type == bfd_link_hash_indirect
10612 || h->root.type == bfd_link_hash_warning)
10613 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10616 r_type = ELF32_R_TYPE (rel->r_info);
10617 r_type = arm_real_reloc_type (globals, r_type);
10621 case R_ARM_GOT_PREL:
10622 case R_ARM_TLS_GD32:
10623 case R_ARM_TLS_IE32:
10626 if (h->got.refcount > 0)
10627 h->got.refcount -= 1;
10629 else if (local_got_refcounts != NULL)
10631 if (local_got_refcounts[r_symndx] > 0)
10632 local_got_refcounts[r_symndx] -= 1;
10636 case R_ARM_TLS_LDM32:
10637 globals->tls_ldm_got.refcount -= 1;
10641 case R_ARM_ABS32_NOI:
10643 case R_ARM_REL32_NOI:
10649 case R_ARM_THM_CALL:
10650 case R_ARM_THM_JUMP24:
10651 case R_ARM_THM_JUMP19:
10652 case R_ARM_MOVW_ABS_NC:
10653 case R_ARM_MOVT_ABS:
10654 case R_ARM_MOVW_PREL_NC:
10655 case R_ARM_MOVT_PREL:
10656 case R_ARM_THM_MOVW_ABS_NC:
10657 case R_ARM_THM_MOVT_ABS:
10658 case R_ARM_THM_MOVW_PREL_NC:
10659 case R_ARM_THM_MOVT_PREL:
10660 /* Should the interworking branches be here also? */
10664 struct elf32_arm_link_hash_entry *eh;
10665 struct elf_dyn_relocs **pp;
10666 struct elf_dyn_relocs *p;
10668 eh = (struct elf32_arm_link_hash_entry *) h;
10670 if (h->plt.refcount > 0)
10672 h->plt.refcount -= 1;
10673 if (r_type == R_ARM_THM_CALL)
10674 eh->plt_maybe_thumb_refcount--;
10676 if (r_type == R_ARM_THM_JUMP24
10677 || r_type == R_ARM_THM_JUMP19)
10678 eh->plt_thumb_refcount--;
10681 if (r_type == R_ARM_ABS32
10682 || r_type == R_ARM_REL32
10683 || r_type == R_ARM_ABS32_NOI
10684 || r_type == R_ARM_REL32_NOI)
10685 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
10689 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10690 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10707 /* Look through the relocs for a section during the first phase. */
10710 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10711 asection *sec, const Elf_Internal_Rela *relocs)
10713 Elf_Internal_Shdr *symtab_hdr;
10714 struct elf_link_hash_entry **sym_hashes;
10715 const Elf_Internal_Rela *rel;
10716 const Elf_Internal_Rela *rel_end;
10719 struct elf32_arm_link_hash_table *htab;
10720 bfd_boolean needs_plt;
10721 unsigned long nsyms;
10723 if (info->relocatable)
10726 BFD_ASSERT (is_arm_elf (abfd));
10728 htab = elf32_arm_hash_table (info);
10734 /* Create dynamic sections for relocatable executables so that we can
10735 copy relocations. */
10736 if (htab->root.is_relocatable_executable
10737 && ! htab->root.dynamic_sections_created)
10739 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10743 dynobj = elf_hash_table (info)->dynobj;
10744 symtab_hdr = & elf_symtab_hdr (abfd);
10745 sym_hashes = elf_sym_hashes (abfd);
10746 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10748 rel_end = relocs + sec->reloc_count;
10749 for (rel = relocs; rel < rel_end; rel++)
10751 struct elf_link_hash_entry *h;
10752 struct elf32_arm_link_hash_entry *eh;
10753 unsigned long r_symndx;
10756 r_symndx = ELF32_R_SYM (rel->r_info);
10757 r_type = ELF32_R_TYPE (rel->r_info);
10758 r_type = arm_real_reloc_type (htab, r_type);
10760 if (r_symndx >= nsyms
10761 /* PR 9934: It is possible to have relocations that do not
10762 refer to symbols, thus it is also possible to have an
10763 object file containing relocations but no symbol table. */
10764 && (r_symndx > STN_UNDEF || nsyms > 0))
10766 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10771 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10775 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10776 while (h->root.type == bfd_link_hash_indirect
10777 || h->root.type == bfd_link_hash_warning)
10778 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10781 eh = (struct elf32_arm_link_hash_entry *) h;
10786 case R_ARM_GOT_PREL:
10787 case R_ARM_TLS_GD32:
10788 case R_ARM_TLS_IE32:
10789 /* This symbol requires a global offset table entry. */
10791 int tls_type, old_tls_type;
10795 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10796 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10797 default: tls_type = GOT_NORMAL; break;
10803 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10807 bfd_signed_vma *local_got_refcounts;
10809 /* This is a global offset table entry for a local symbol. */
10810 local_got_refcounts = elf_local_got_refcounts (abfd);
10811 if (local_got_refcounts == NULL)
10813 bfd_size_type size;
10815 size = symtab_hdr->sh_info;
10816 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10817 local_got_refcounts = (bfd_signed_vma *)
10818 bfd_zalloc (abfd, size);
10819 if (local_got_refcounts == NULL)
10821 elf_local_got_refcounts (abfd) = local_got_refcounts;
10822 elf32_arm_local_got_tls_type (abfd)
10823 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10825 local_got_refcounts[r_symndx] += 1;
10826 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10829 /* We will already have issued an error message if there is a
10830 TLS / non-TLS mismatch, based on the symbol type. We don't
10831 support any linker relaxations. So just combine any TLS
10833 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10834 && tls_type != GOT_NORMAL)
10835 tls_type |= old_tls_type;
10837 if (old_tls_type != tls_type)
10840 elf32_arm_hash_entry (h)->tls_type = tls_type;
10842 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10845 /* Fall through. */
10847 case R_ARM_TLS_LDM32:
10848 if (r_type == R_ARM_TLS_LDM32)
10849 htab->tls_ldm_got.refcount++;
10850 /* Fall through. */
10852 case R_ARM_GOTOFF32:
10854 if (htab->root.sgot == NULL)
10856 if (htab->root.dynobj == NULL)
10857 htab->root.dynobj = abfd;
10858 if (!create_got_section (htab->root.dynobj, info))
10864 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10865 ldr __GOTT_INDEX__ offsets. */
10866 if (!htab->vxworks_p)
10868 /* Fall through. */
10875 case R_ARM_THM_CALL:
10876 case R_ARM_THM_JUMP24:
10877 case R_ARM_THM_JUMP19:
10881 case R_ARM_MOVW_ABS_NC:
10882 case R_ARM_MOVT_ABS:
10883 case R_ARM_THM_MOVW_ABS_NC:
10884 case R_ARM_THM_MOVT_ABS:
10887 (*_bfd_error_handler)
10888 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10889 abfd, elf32_arm_howto_table_1[r_type].name,
10890 (h) ? h->root.root.string : "a local symbol");
10891 bfd_set_error (bfd_error_bad_value);
10895 /* Fall through. */
10897 case R_ARM_ABS32_NOI:
10899 case R_ARM_REL32_NOI:
10900 case R_ARM_MOVW_PREL_NC:
10901 case R_ARM_MOVT_PREL:
10902 case R_ARM_THM_MOVW_PREL_NC:
10903 case R_ARM_THM_MOVT_PREL:
10907 /* Should the interworking branches be listed here? */
10910 /* If this reloc is in a read-only section, we might
10911 need a copy reloc. We can't check reliably at this
10912 stage whether the section is read-only, as input
10913 sections have not yet been mapped to output sections.
10914 Tentatively set the flag for now, and correct in
10915 adjust_dynamic_symbol. */
10917 h->non_got_ref = 1;
10919 /* We may need a .plt entry if the function this reloc
10920 refers to is in a different object. We can't tell for
10921 sure yet, because something later might force the
10926 /* If we create a PLT entry, this relocation will reference
10927 it, even if it's an ABS32 relocation. */
10928 h->plt.refcount += 1;
10930 /* It's too early to use htab->use_blx here, so we have to
10931 record possible blx references separately from
10932 relocs that definitely need a thumb stub. */
10934 if (r_type == R_ARM_THM_CALL)
10935 eh->plt_maybe_thumb_refcount += 1;
10937 if (r_type == R_ARM_THM_JUMP24
10938 || r_type == R_ARM_THM_JUMP19)
10939 eh->plt_thumb_refcount += 1;
10942 /* If we are creating a shared library or relocatable executable,
10943 and this is a reloc against a global symbol, or a non PC
10944 relative reloc against a local symbol, then we need to copy
10945 the reloc into the shared library. However, if we are linking
10946 with -Bsymbolic, we do not need to copy a reloc against a
10947 global symbol which is defined in an object we are
10948 including in the link (i.e., DEF_REGULAR is set). At
10949 this point we have not seen all the input files, so it is
10950 possible that DEF_REGULAR is not set now but will be set
10951 later (it is never cleared). We account for that
10952 possibility below by storing information in the
10953 dyn_relocs field of the hash table entry. */
10954 if ((info->shared || htab->root.is_relocatable_executable)
10955 && (sec->flags & SEC_ALLOC) != 0
10956 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10957 || (h != NULL && ! h->needs_plt
10958 && (! info->symbolic || ! h->def_regular))))
10960 struct elf_dyn_relocs *p, **head;
10962 /* When creating a shared object, we must copy these
10963 reloc types into the output file. We create a reloc
10964 section in dynobj and make room for this reloc. */
10965 if (sreloc == NULL)
10967 sreloc = _bfd_elf_make_dynamic_reloc_section
10968 (sec, dynobj, 2, abfd, ! htab->use_rel);
10970 if (sreloc == NULL)
10973 /* BPABI objects never have dynamic relocations mapped. */
10974 if (htab->symbian_p)
10978 flags = bfd_get_section_flags (dynobj, sreloc);
10979 flags &= ~(SEC_LOAD | SEC_ALLOC);
10980 bfd_set_section_flags (dynobj, sreloc, flags);
10984 /* If this is a global symbol, we count the number of
10985 relocations we need for this symbol. */
10988 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
10992 /* Track dynamic relocs needed for local syms too.
10993 We really need local syms available to do this
10994 easily. Oh well. */
10997 Elf_Internal_Sym *isym;
10999 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11004 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11008 vpp = &elf_section_data (s)->local_dynrel;
11009 head = (struct elf_dyn_relocs **) vpp;
11013 if (p == NULL || p->sec != sec)
11015 bfd_size_type amt = sizeof *p;
11017 p = (struct elf_dyn_relocs *)
11018 bfd_alloc (htab->root.dynobj, amt);
11028 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11034 /* This relocation describes the C++ object vtable hierarchy.
11035 Reconstruct it for later use during GC. */
11036 case R_ARM_GNU_VTINHERIT:
11037 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11041 /* This relocation describes which C++ vtable entries are actually
11042 used. Record for later use during GC. */
11043 case R_ARM_GNU_VTENTRY:
11044 BFD_ASSERT (h != NULL);
11046 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11055 /* Unwinding tables are not referenced directly. This pass marks them as
11056 required if the corresponding code section is marked. */
11059 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11060 elf_gc_mark_hook_fn gc_mark_hook)
11063 Elf_Internal_Shdr **elf_shdrp;
11066 /* Marking EH data may cause additional code sections to be marked,
11067 requiring multiple passes. */
11072 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11076 if (! is_arm_elf (sub))
11079 elf_shdrp = elf_elfsections (sub);
11080 for (o = sub->sections; o != NULL; o = o->next)
11082 Elf_Internal_Shdr *hdr;
11084 hdr = &elf_section_data (o)->this_hdr;
11085 if (hdr->sh_type == SHT_ARM_EXIDX
11087 && hdr->sh_link < elf_numsections (sub)
11089 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11092 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11102 /* Treat mapping symbols as special target symbols. */
11105 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11107 return bfd_is_arm_special_symbol_name (sym->name,
11108 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11111 /* This is a copy of elf_find_function() from elf.c except that
11112 ARM mapping symbols are ignored when looking for function names
11113 and STT_ARM_TFUNC is considered to a function type. */
11116 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11117 asection * section,
11118 asymbol ** symbols,
11120 const char ** filename_ptr,
11121 const char ** functionname_ptr)
11123 const char * filename = NULL;
11124 asymbol * func = NULL;
11125 bfd_vma low_func = 0;
11128 for (p = symbols; *p != NULL; p++)
11130 elf_symbol_type *q;
11132 q = (elf_symbol_type *) *p;
11134 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11139 filename = bfd_asymbol_name (&q->symbol);
11142 case STT_ARM_TFUNC:
11144 /* Skip mapping symbols. */
11145 if ((q->symbol.flags & BSF_LOCAL)
11146 && bfd_is_arm_special_symbol_name (q->symbol.name,
11147 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11149 /* Fall through. */
11150 if (bfd_get_section (&q->symbol) == section
11151 && q->symbol.value >= low_func
11152 && q->symbol.value <= offset)
11154 func = (asymbol *) q;
11155 low_func = q->symbol.value;
11165 *filename_ptr = filename;
11166 if (functionname_ptr)
11167 *functionname_ptr = bfd_asymbol_name (func);
11173 /* Find the nearest line to a particular section and offset, for error
11174 reporting. This code is a duplicate of the code in elf.c, except
11175 that it uses arm_elf_find_function. */
11178 elf32_arm_find_nearest_line (bfd * abfd,
11179 asection * section,
11180 asymbol ** symbols,
11182 const char ** filename_ptr,
11183 const char ** functionname_ptr,
11184 unsigned int * line_ptr)
11186 bfd_boolean found = FALSE;
11188 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11190 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11191 filename_ptr, functionname_ptr,
11193 & elf_tdata (abfd)->dwarf2_find_line_info))
11195 if (!*functionname_ptr)
11196 arm_elf_find_function (abfd, section, symbols, offset,
11197 *filename_ptr ? NULL : filename_ptr,
11203 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11204 & found, filename_ptr,
11205 functionname_ptr, line_ptr,
11206 & elf_tdata (abfd)->line_info))
11209 if (found && (*functionname_ptr || *line_ptr))
11212 if (symbols == NULL)
11215 if (! arm_elf_find_function (abfd, section, symbols, offset,
11216 filename_ptr, functionname_ptr))
11224 elf32_arm_find_inliner_info (bfd * abfd,
11225 const char ** filename_ptr,
11226 const char ** functionname_ptr,
11227 unsigned int * line_ptr)
11230 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11231 functionname_ptr, line_ptr,
11232 & elf_tdata (abfd)->dwarf2_find_line_info);
11236 /* Adjust a symbol defined by a dynamic object and referenced by a
11237 regular object. The current definition is in some section of the
11238 dynamic object, but we're not including those sections. We have to
11239 change the definition to something the rest of the link can
11243 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11244 struct elf_link_hash_entry * h)
11248 struct elf32_arm_link_hash_entry * eh;
11249 struct elf32_arm_link_hash_table *globals;
11251 globals = elf32_arm_hash_table (info);
11252 if (globals == NULL)
11255 dynobj = elf_hash_table (info)->dynobj;
11257 /* Make sure we know what is going on here. */
11258 BFD_ASSERT (dynobj != NULL
11260 || h->u.weakdef != NULL
11263 && !h->def_regular)));
11265 eh = (struct elf32_arm_link_hash_entry *) h;
11267 /* If this is a function, put it in the procedure linkage table. We
11268 will fill in the contents of the procedure linkage table later,
11269 when we know the address of the .got section. */
11270 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11273 if (h->plt.refcount <= 0
11274 || SYMBOL_CALLS_LOCAL (info, h)
11275 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11276 && h->root.type == bfd_link_hash_undefweak))
11278 /* This case can occur if we saw a PLT32 reloc in an input
11279 file, but the symbol was never referred to by a dynamic
11280 object, or if all references were garbage collected. In
11281 such a case, we don't actually need to build a procedure
11282 linkage table, and we can just do a PC24 reloc instead. */
11283 h->plt.offset = (bfd_vma) -1;
11284 eh->plt_thumb_refcount = 0;
11285 eh->plt_maybe_thumb_refcount = 0;
11293 /* It's possible that we incorrectly decided a .plt reloc was
11294 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11295 in check_relocs. We can't decide accurately between function
11296 and non-function syms in check-relocs; Objects loaded later in
11297 the link may change h->type. So fix it now. */
11298 h->plt.offset = (bfd_vma) -1;
11299 eh->plt_thumb_refcount = 0;
11300 eh->plt_maybe_thumb_refcount = 0;
11303 /* If this is a weak symbol, and there is a real definition, the
11304 processor independent code will have arranged for us to see the
11305 real definition first, and we can just use the same value. */
11306 if (h->u.weakdef != NULL)
11308 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11309 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11310 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11311 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11315 /* If there are no non-GOT references, we do not need a copy
11317 if (!h->non_got_ref)
11320 /* This is a reference to a symbol defined by a dynamic object which
11321 is not a function. */
11323 /* If we are creating a shared library, we must presume that the
11324 only references to the symbol are via the global offset table.
11325 For such cases we need not do anything here; the relocations will
11326 be handled correctly by relocate_section. Relocatable executables
11327 can reference data in shared objects directly, so we don't need to
11328 do anything here. */
11329 if (info->shared || globals->root.is_relocatable_executable)
11334 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11335 h->root.root.string);
11339 /* We must allocate the symbol in our .dynbss section, which will
11340 become part of the .bss section of the executable. There will be
11341 an entry for this symbol in the .dynsym section. The dynamic
11342 object will contain position independent code, so all references
11343 from the dynamic object to this symbol will go through the global
11344 offset table. The dynamic linker will use the .dynsym entry to
11345 determine the address it must put in the global offset table, so
11346 both the dynamic object and the regular object will refer to the
11347 same memory location for the variable. */
11348 s = bfd_get_section_by_name (dynobj, ".dynbss");
11349 BFD_ASSERT (s != NULL);
11351 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11352 copy the initial value out of the dynamic object and into the
11353 runtime process image. We need to remember the offset into the
11354 .rel(a).bss section we are going to use. */
11355 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11359 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11360 BFD_ASSERT (srel != NULL);
11361 srel->size += RELOC_SIZE (globals);
11365 return _bfd_elf_adjust_dynamic_copy (h, s);
11368 /* Allocate space in .plt, .got and associated reloc sections for
11372 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11374 struct bfd_link_info *info;
11375 struct elf32_arm_link_hash_table *htab;
11376 struct elf32_arm_link_hash_entry *eh;
11377 struct elf_dyn_relocs *p;
11378 bfd_signed_vma thumb_refs;
11380 eh = (struct elf32_arm_link_hash_entry *) h;
11382 if (h->root.type == bfd_link_hash_indirect)
11385 if (h->root.type == bfd_link_hash_warning)
11386 /* When warning symbols are created, they **replace** the "real"
11387 entry in the hash table, thus we never get to see the real
11388 symbol in a hash traversal. So look at it now. */
11389 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11391 info = (struct bfd_link_info *) inf;
11392 htab = elf32_arm_hash_table (info);
11396 if (htab->root.dynamic_sections_created
11397 && h->plt.refcount > 0)
11399 /* Make sure this symbol is output as a dynamic symbol.
11400 Undefined weak syms won't yet be marked as dynamic. */
11401 if (h->dynindx == -1
11402 && !h->forced_local)
11404 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11409 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11411 asection *s = htab->root.splt;
11413 /* If this is the first .plt entry, make room for the special
11416 s->size += htab->plt_header_size;
11418 h->plt.offset = s->size;
11420 /* If we will insert a Thumb trampoline before this PLT, leave room
11422 thumb_refs = eh->plt_thumb_refcount;
11423 if (!htab->use_blx)
11424 thumb_refs += eh->plt_maybe_thumb_refcount;
11426 if (thumb_refs > 0)
11428 h->plt.offset += PLT_THUMB_STUB_SIZE;
11429 s->size += PLT_THUMB_STUB_SIZE;
11432 /* If this symbol is not defined in a regular file, and we are
11433 not generating a shared library, then set the symbol to this
11434 location in the .plt. This is required to make function
11435 pointers compare as equal between the normal executable and
11436 the shared library. */
11438 && !h->def_regular)
11440 h->root.u.def.section = s;
11441 h->root.u.def.value = h->plt.offset;
11443 /* Make sure the function is not marked as Thumb, in case
11444 it is the target of an ABS32 relocation, which will
11445 point to the PLT entry. */
11446 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11447 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11450 /* Make room for this entry. */
11451 s->size += htab->plt_entry_size;
11453 if (!htab->symbian_p)
11455 /* We also need to make an entry in the .got.plt section, which
11456 will be placed in the .got section by the linker script. */
11457 eh->plt_got_offset = htab->root.sgotplt->size;
11458 htab->root.sgotplt->size += 4;
11461 /* We also need to make an entry in the .rel(a).plt section. */
11462 htab->root.srelplt->size += RELOC_SIZE (htab);
11464 /* VxWorks executables have a second set of relocations for
11465 each PLT entry. They go in a separate relocation section,
11466 which is processed by the kernel loader. */
11467 if (htab->vxworks_p && !info->shared)
11469 /* There is a relocation for the initial PLT entry:
11470 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11471 if (h->plt.offset == htab->plt_header_size)
11472 htab->srelplt2->size += RELOC_SIZE (htab);
11474 /* There are two extra relocations for each subsequent
11475 PLT entry: an R_ARM_32 relocation for the GOT entry,
11476 and an R_ARM_32 relocation for the PLT entry. */
11477 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11482 h->plt.offset = (bfd_vma) -1;
11488 h->plt.offset = (bfd_vma) -1;
11492 if (h->got.refcount > 0)
11496 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11499 /* Make sure this symbol is output as a dynamic symbol.
11500 Undefined weak syms won't yet be marked as dynamic. */
11501 if (h->dynindx == -1
11502 && !h->forced_local)
11504 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11508 if (!htab->symbian_p)
11510 s = htab->root.sgot;
11511 h->got.offset = s->size;
11513 if (tls_type == GOT_UNKNOWN)
11516 if (tls_type == GOT_NORMAL)
11517 /* Non-TLS symbols need one GOT slot. */
11521 if (tls_type & GOT_TLS_GD)
11522 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11524 if (tls_type & GOT_TLS_IE)
11525 /* R_ARM_TLS_IE32 needs one GOT slot. */
11529 dyn = htab->root.dynamic_sections_created;
11532 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11534 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11537 if (tls_type != GOT_NORMAL
11538 && (info->shared || indx != 0)
11539 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11540 || h->root.type != bfd_link_hash_undefweak))
11542 if (tls_type & GOT_TLS_IE)
11543 htab->root.srelgot->size += RELOC_SIZE (htab);
11545 if (tls_type & GOT_TLS_GD)
11546 htab->root.srelgot->size += RELOC_SIZE (htab);
11548 if ((tls_type & GOT_TLS_GD) && indx != 0)
11549 htab->root.srelgot->size += RELOC_SIZE (htab);
11551 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11552 || h->root.type != bfd_link_hash_undefweak)
11554 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11555 htab->root.srelgot->size += RELOC_SIZE (htab);
11559 h->got.offset = (bfd_vma) -1;
11561 /* Allocate stubs for exported Thumb functions on v4t. */
11562 if (!htab->use_blx && h->dynindx != -1
11564 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11565 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11567 struct elf_link_hash_entry * th;
11568 struct bfd_link_hash_entry * bh;
11569 struct elf_link_hash_entry * myh;
11573 /* Create a new symbol to regist the real location of the function. */
11574 s = h->root.u.def.section;
11575 sprintf (name, "__real_%s", h->root.root.string);
11576 _bfd_generic_link_add_one_symbol (info, s->owner,
11577 name, BSF_GLOBAL, s,
11578 h->root.u.def.value,
11579 NULL, TRUE, FALSE, &bh);
11581 myh = (struct elf_link_hash_entry *) bh;
11582 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11583 myh->forced_local = 1;
11584 eh->export_glue = myh;
11585 th = record_arm_to_thumb_glue (info, h);
11586 /* Point the symbol at the stub. */
11587 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11588 h->root.u.def.section = th->root.u.def.section;
11589 h->root.u.def.value = th->root.u.def.value & ~1;
11592 if (eh->dyn_relocs == NULL)
11595 /* In the shared -Bsymbolic case, discard space allocated for
11596 dynamic pc-relative relocs against symbols which turn out to be
11597 defined in regular objects. For the normal shared case, discard
11598 space for pc-relative relocs that have become local due to symbol
11599 visibility changes. */
11601 if (info->shared || htab->root.is_relocatable_executable)
11603 /* The only relocs that use pc_count are R_ARM_REL32 and
11604 R_ARM_REL32_NOI, which will appear on something like
11605 ".long foo - .". We want calls to protected symbols to resolve
11606 directly to the function rather than going via the plt. If people
11607 want function pointer comparisons to work as expected then they
11608 should avoid writing assembly like ".long foo - .". */
11609 if (SYMBOL_CALLS_LOCAL (info, h))
11611 struct elf_dyn_relocs **pp;
11613 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
11615 p->count -= p->pc_count;
11624 if (htab->vxworks_p)
11626 struct elf_dyn_relocs **pp;
11628 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
11630 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
11637 /* Also discard relocs on undefined weak syms with non-default
11639 if (eh->dyn_relocs != NULL
11640 && h->root.type == bfd_link_hash_undefweak)
11642 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11643 eh->dyn_relocs = NULL;
11645 /* Make sure undefined weak symbols are output as a dynamic
11647 else if (h->dynindx == -1
11648 && !h->forced_local)
11650 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11655 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11656 && h->root.type == bfd_link_hash_new)
11658 /* Output absolute symbols so that we can create relocations
11659 against them. For normal symbols we output a relocation
11660 against the section that contains them. */
11661 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11668 /* For the non-shared case, discard space for relocs against
11669 symbols which turn out to need copy relocs or are not
11672 if (!h->non_got_ref
11673 && ((h->def_dynamic
11674 && !h->def_regular)
11675 || (htab->root.dynamic_sections_created
11676 && (h->root.type == bfd_link_hash_undefweak
11677 || h->root.type == bfd_link_hash_undefined))))
11679 /* Make sure this symbol is output as a dynamic symbol.
11680 Undefined weak syms won't yet be marked as dynamic. */
11681 if (h->dynindx == -1
11682 && !h->forced_local)
11684 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11688 /* If that succeeded, we know we'll be keeping all the
11690 if (h->dynindx != -1)
11694 eh->dyn_relocs = NULL;
11699 /* Finally, allocate space. */
11700 for (p = eh->dyn_relocs; p != NULL; p = p->next)
11702 asection *sreloc = elf_section_data (p->sec)->sreloc;
11703 sreloc->size += p->count * RELOC_SIZE (htab);
11709 /* Find any dynamic relocs that apply to read-only sections. */
11712 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11714 struct elf32_arm_link_hash_entry * eh;
11715 struct elf_dyn_relocs * p;
11717 if (h->root.type == bfd_link_hash_warning)
11718 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11720 eh = (struct elf32_arm_link_hash_entry *) h;
11721 for (p = eh->dyn_relocs; p != NULL; p = p->next)
11723 asection *s = p->sec;
11725 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11727 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11729 info->flags |= DF_TEXTREL;
11731 /* Not an error, just cut short the traversal. */
11739 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11742 struct elf32_arm_link_hash_table *globals;
11744 globals = elf32_arm_hash_table (info);
11745 if (globals == NULL)
11748 globals->byteswap_code = byteswap_code;
11751 /* Set the sizes of the dynamic sections. */
11754 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11755 struct bfd_link_info * info)
11760 bfd_boolean relocs;
11762 struct elf32_arm_link_hash_table *htab;
11764 htab = elf32_arm_hash_table (info);
11768 dynobj = elf_hash_table (info)->dynobj;
11769 BFD_ASSERT (dynobj != NULL);
11770 check_use_blx (htab);
11772 if (elf_hash_table (info)->dynamic_sections_created)
11774 /* Set the contents of the .interp section to the interpreter. */
11775 if (info->executable)
11777 s = bfd_get_section_by_name (dynobj, ".interp");
11778 BFD_ASSERT (s != NULL);
11779 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11780 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11784 /* Set up .got offsets for local syms, and space for local dynamic
11786 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11788 bfd_signed_vma *local_got;
11789 bfd_signed_vma *end_local_got;
11790 char *local_tls_type;
11791 bfd_size_type locsymcount;
11792 Elf_Internal_Shdr *symtab_hdr;
11794 bfd_boolean is_vxworks = htab->vxworks_p;
11796 if (! is_arm_elf (ibfd))
11799 for (s = ibfd->sections; s != NULL; s = s->next)
11801 struct elf_dyn_relocs *p;
11803 for (p = (struct elf_dyn_relocs *)
11804 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11806 if (!bfd_is_abs_section (p->sec)
11807 && bfd_is_abs_section (p->sec->output_section))
11809 /* Input section has been discarded, either because
11810 it is a copy of a linkonce section or due to
11811 linker script /DISCARD/, so we'll be discarding
11814 else if (is_vxworks
11815 && strcmp (p->sec->output_section->name,
11818 /* Relocations in vxworks .tls_vars sections are
11819 handled specially by the loader. */
11821 else if (p->count != 0)
11823 srel = elf_section_data (p->sec)->sreloc;
11824 srel->size += p->count * RELOC_SIZE (htab);
11825 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
11826 info->flags |= DF_TEXTREL;
11831 local_got = elf_local_got_refcounts (ibfd);
11835 symtab_hdr = & elf_symtab_hdr (ibfd);
11836 locsymcount = symtab_hdr->sh_info;
11837 end_local_got = local_got + locsymcount;
11838 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11839 s = htab->root.sgot;
11840 srel = htab->root.srelgot;
11841 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11843 if (*local_got > 0)
11845 *local_got = s->size;
11846 if (*local_tls_type & GOT_TLS_GD)
11847 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11849 if (*local_tls_type & GOT_TLS_IE)
11851 if (*local_tls_type == GOT_NORMAL)
11854 if (info->shared || *local_tls_type == GOT_TLS_GD)
11855 srel->size += RELOC_SIZE (htab);
11858 *local_got = (bfd_vma) -1;
11862 if (htab->tls_ldm_got.refcount > 0)
11864 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11865 for R_ARM_TLS_LDM32 relocations. */
11866 htab->tls_ldm_got.offset = htab->root.sgot->size;
11867 htab->root.sgot->size += 8;
11869 htab->root.srelgot->size += RELOC_SIZE (htab);
11872 htab->tls_ldm_got.offset = -1;
11874 /* Allocate global sym .plt and .got entries, and space for global
11875 sym dynamic relocs. */
11876 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11878 /* Here we rummage through the found bfds to collect glue information. */
11879 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11881 if (! is_arm_elf (ibfd))
11884 /* Initialise mapping tables for code/data. */
11885 bfd_elf32_arm_init_maps (ibfd);
11887 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11888 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11889 /* xgettext:c-format */
11890 _bfd_error_handler (_("Errors encountered processing file %s"),
11894 /* Allocate space for the glue sections now that we've sized them. */
11895 bfd_elf32_arm_allocate_interworking_sections (info);
11897 /* The check_relocs and adjust_dynamic_symbol entry points have
11898 determined the sizes of the various dynamic sections. Allocate
11899 memory for them. */
11902 for (s = dynobj->sections; s != NULL; s = s->next)
11906 if ((s->flags & SEC_LINKER_CREATED) == 0)
11909 /* It's OK to base decisions on the section name, because none
11910 of the dynobj section names depend upon the input files. */
11911 name = bfd_get_section_name (dynobj, s);
11913 if (strcmp (name, ".plt") == 0)
11915 /* Remember whether there is a PLT. */
11916 plt = s->size != 0;
11918 else if (CONST_STRNEQ (name, ".rel"))
11922 /* Remember whether there are any reloc sections other
11923 than .rel(a).plt and .rela.plt.unloaded. */
11924 if (s != htab->root.srelplt && s != htab->srelplt2)
11927 /* We use the reloc_count field as a counter if we need
11928 to copy relocs into the output file. */
11929 s->reloc_count = 0;
11932 else if (! CONST_STRNEQ (name, ".got")
11933 && strcmp (name, ".dynbss") != 0)
11935 /* It's not one of our sections, so don't allocate space. */
11941 /* If we don't need this section, strip it from the
11942 output file. This is mostly to handle .rel(a).bss and
11943 .rel(a).plt. We must create both sections in
11944 create_dynamic_sections, because they must be created
11945 before the linker maps input sections to output
11946 sections. The linker does that before
11947 adjust_dynamic_symbol is called, and it is that
11948 function which decides whether anything needs to go
11949 into these sections. */
11950 s->flags |= SEC_EXCLUDE;
11954 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11957 /* Allocate memory for the section contents. */
11958 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
11959 if (s->contents == NULL)
11963 if (elf_hash_table (info)->dynamic_sections_created)
11965 /* Add some entries to the .dynamic section. We fill in the
11966 values later, in elf32_arm_finish_dynamic_sections, but we
11967 must add the entries now so that we get the correct size for
11968 the .dynamic section. The DT_DEBUG entry is filled in by the
11969 dynamic linker and used by the debugger. */
11970 #define add_dynamic_entry(TAG, VAL) \
11971 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11973 if (info->executable)
11975 if (!add_dynamic_entry (DT_DEBUG, 0))
11981 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11982 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11983 || !add_dynamic_entry (DT_PLTREL,
11984 htab->use_rel ? DT_REL : DT_RELA)
11985 || !add_dynamic_entry (DT_JMPREL, 0))
11993 if (!add_dynamic_entry (DT_REL, 0)
11994 || !add_dynamic_entry (DT_RELSZ, 0)
11995 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12000 if (!add_dynamic_entry (DT_RELA, 0)
12001 || !add_dynamic_entry (DT_RELASZ, 0)
12002 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12007 /* If any dynamic relocs apply to a read-only section,
12008 then we need a DT_TEXTREL entry. */
12009 if ((info->flags & DF_TEXTREL) == 0)
12010 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12013 if ((info->flags & DF_TEXTREL) != 0)
12015 if (!add_dynamic_entry (DT_TEXTREL, 0))
12018 if (htab->vxworks_p
12019 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12022 #undef add_dynamic_entry
12027 /* Finish up dynamic symbol handling. We set the contents of various
12028 dynamic sections here. */
12031 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12032 struct bfd_link_info * info,
12033 struct elf_link_hash_entry * h,
12034 Elf_Internal_Sym * sym)
12036 struct elf32_arm_link_hash_table *htab;
12037 struct elf32_arm_link_hash_entry *eh;
12039 htab = elf32_arm_hash_table (info);
12043 eh = (struct elf32_arm_link_hash_entry *) h;
12045 if (h->plt.offset != (bfd_vma) -1)
12051 Elf_Internal_Rela rel;
12053 /* This symbol has an entry in the procedure linkage table. Set
12056 BFD_ASSERT (h->dynindx != -1);
12058 splt = htab->root.splt;
12059 srel = htab->root.srelplt;
12060 BFD_ASSERT (splt != NULL && srel != NULL);
12062 /* Fill in the entry in the procedure linkage table. */
12063 if (htab->symbian_p)
12065 put_arm_insn (htab, output_bfd,
12066 elf32_arm_symbian_plt_entry[0],
12067 splt->contents + h->plt.offset);
12068 bfd_put_32 (output_bfd,
12069 elf32_arm_symbian_plt_entry[1],
12070 splt->contents + h->plt.offset + 4);
12072 /* Fill in the entry in the .rel.plt section. */
12073 rel.r_offset = (splt->output_section->vma
12074 + splt->output_offset
12075 + h->plt.offset + 4);
12076 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12078 /* Get the index in the procedure linkage table which
12079 corresponds to this symbol. This is the index of this symbol
12080 in all the symbols for which we are making plt entries. The
12081 first entry in the procedure linkage table is reserved. */
12082 plt_index = ((h->plt.offset - htab->plt_header_size)
12083 / htab->plt_entry_size);
12087 bfd_vma got_offset, got_address, plt_address;
12088 bfd_vma got_displacement;
12092 sgot = htab->root.sgotplt;
12093 BFD_ASSERT (sgot != NULL);
12095 /* Get the offset into the .got.plt table of the entry that
12096 corresponds to this function. */
12097 got_offset = eh->plt_got_offset;
12099 /* Get the index in the procedure linkage table which
12100 corresponds to this symbol. This is the index of this symbol
12101 in all the symbols for which we are making plt entries. The
12102 first three entries in .got.plt are reserved; after that
12103 symbols appear in the same order as in .plt. */
12104 plt_index = (got_offset - 12) / 4;
12106 /* Calculate the address of the GOT entry. */
12107 got_address = (sgot->output_section->vma
12108 + sgot->output_offset
12111 /* ...and the address of the PLT entry. */
12112 plt_address = (splt->output_section->vma
12113 + splt->output_offset
12116 ptr = splt->contents + h->plt.offset;
12117 if (htab->vxworks_p && info->shared)
12122 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12124 val = elf32_arm_vxworks_shared_plt_entry[i];
12126 val |= got_address - sgot->output_section->vma;
12128 val |= plt_index * RELOC_SIZE (htab);
12129 if (i == 2 || i == 5)
12130 bfd_put_32 (output_bfd, val, ptr);
12132 put_arm_insn (htab, output_bfd, val, ptr);
12135 else if (htab->vxworks_p)
12140 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12142 val = elf32_arm_vxworks_exec_plt_entry[i];
12144 val |= got_address;
12146 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12148 val |= plt_index * RELOC_SIZE (htab);
12149 if (i == 2 || i == 5)
12150 bfd_put_32 (output_bfd, val, ptr);
12152 put_arm_insn (htab, output_bfd, val, ptr);
12155 loc = (htab->srelplt2->contents
12156 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12158 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12159 referencing the GOT for this PLT entry. */
12160 rel.r_offset = plt_address + 8;
12161 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12162 rel.r_addend = got_offset;
12163 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12164 loc += RELOC_SIZE (htab);
12166 /* Create the R_ARM_ABS32 relocation referencing the
12167 beginning of the PLT for this GOT entry. */
12168 rel.r_offset = got_address;
12169 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12171 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12175 bfd_signed_vma thumb_refs;
12176 /* Calculate the displacement between the PLT slot and the
12177 entry in the GOT. The eight-byte offset accounts for the
12178 value produced by adding to pc in the first instruction
12179 of the PLT stub. */
12180 got_displacement = got_address - (plt_address + 8);
12182 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12184 thumb_refs = eh->plt_thumb_refcount;
12185 if (!htab->use_blx)
12186 thumb_refs += eh->plt_maybe_thumb_refcount;
12188 if (thumb_refs > 0)
12190 put_thumb_insn (htab, output_bfd,
12191 elf32_arm_plt_thumb_stub[0], ptr - 4);
12192 put_thumb_insn (htab, output_bfd,
12193 elf32_arm_plt_thumb_stub[1], ptr - 2);
12196 put_arm_insn (htab, output_bfd,
12197 elf32_arm_plt_entry[0]
12198 | ((got_displacement & 0x0ff00000) >> 20),
12200 put_arm_insn (htab, output_bfd,
12201 elf32_arm_plt_entry[1]
12202 | ((got_displacement & 0x000ff000) >> 12),
12204 put_arm_insn (htab, output_bfd,
12205 elf32_arm_plt_entry[2]
12206 | (got_displacement & 0x00000fff),
12208 #ifdef FOUR_WORD_PLT
12209 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12213 /* Fill in the entry in the global offset table. */
12214 bfd_put_32 (output_bfd,
12215 (splt->output_section->vma
12216 + splt->output_offset),
12217 sgot->contents + got_offset);
12219 /* Fill in the entry in the .rel(a).plt section. */
12221 rel.r_offset = got_address;
12222 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12225 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12226 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12228 if (!h->def_regular)
12230 /* Mark the symbol as undefined, rather than as defined in
12231 the .plt section. Leave the value alone. */
12232 sym->st_shndx = SHN_UNDEF;
12233 /* If the symbol is weak, we do need to clear the value.
12234 Otherwise, the PLT entry would provide a definition for
12235 the symbol even if the symbol wasn't defined anywhere,
12236 and so the symbol would never be NULL. */
12237 if (!h->ref_regular_nonweak)
12242 if (h->got.offset != (bfd_vma) -1
12243 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12244 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12248 Elf_Internal_Rela rel;
12252 /* This symbol has an entry in the global offset table. Set it
12254 sgot = htab->root.sgot;
12255 srel = htab->root.srelgot;
12256 BFD_ASSERT (sgot != NULL && srel != NULL);
12258 offset = (h->got.offset & ~(bfd_vma) 1);
12260 rel.r_offset = (sgot->output_section->vma
12261 + sgot->output_offset
12264 /* If this is a static link, or it is a -Bsymbolic link and the
12265 symbol is defined locally or was forced to be local because
12266 of a version file, we just want to emit a RELATIVE reloc.
12267 The entry in the global offset table will already have been
12268 initialized in the relocate_section function. */
12270 && SYMBOL_REFERENCES_LOCAL (info, h))
12272 BFD_ASSERT ((h->got.offset & 1) != 0);
12273 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12274 if (!htab->use_rel)
12276 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12277 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12282 BFD_ASSERT ((h->got.offset & 1) == 0);
12283 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12284 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12287 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12288 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12294 Elf_Internal_Rela rel;
12297 /* This symbol needs a copy reloc. Set it up. */
12298 BFD_ASSERT (h->dynindx != -1
12299 && (h->root.type == bfd_link_hash_defined
12300 || h->root.type == bfd_link_hash_defweak));
12303 BFD_ASSERT (s != NULL);
12306 rel.r_offset = (h->root.u.def.value
12307 + h->root.u.def.section->output_section->vma
12308 + h->root.u.def.section->output_offset);
12309 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12310 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12311 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12314 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12315 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12316 to the ".got" section. */
12317 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12318 || (!htab->vxworks_p && h == htab->root.hgot))
12319 sym->st_shndx = SHN_ABS;
12324 /* Finish up the dynamic sections. */
12327 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12332 struct elf32_arm_link_hash_table *htab;
12334 htab = elf32_arm_hash_table (info);
12338 dynobj = elf_hash_table (info)->dynobj;
12340 sgot = htab->root.sgotplt;
12341 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12342 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12344 if (elf_hash_table (info)->dynamic_sections_created)
12347 Elf32_External_Dyn *dyncon, *dynconend;
12349 splt = htab->root.splt;
12350 BFD_ASSERT (splt != NULL && sdyn != NULL);
12352 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12353 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12355 for (; dyncon < dynconend; dyncon++)
12357 Elf_Internal_Dyn dyn;
12361 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12368 if (htab->vxworks_p
12369 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12370 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12375 goto get_vma_if_bpabi;
12378 goto get_vma_if_bpabi;
12381 goto get_vma_if_bpabi;
12383 name = ".gnu.version";
12384 goto get_vma_if_bpabi;
12386 name = ".gnu.version_d";
12387 goto get_vma_if_bpabi;
12389 name = ".gnu.version_r";
12390 goto get_vma_if_bpabi;
12396 name = RELOC_SECTION (htab, ".plt");
12398 s = bfd_get_section_by_name (output_bfd, name);
12399 BFD_ASSERT (s != NULL);
12400 if (!htab->symbian_p)
12401 dyn.d_un.d_ptr = s->vma;
12403 /* In the BPABI, tags in the PT_DYNAMIC section point
12404 at the file offset, not the memory address, for the
12405 convenience of the post linker. */
12406 dyn.d_un.d_ptr = s->filepos;
12407 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12411 if (htab->symbian_p)
12416 s = htab->root.srelplt;
12417 BFD_ASSERT (s != NULL);
12418 dyn.d_un.d_val = s->size;
12419 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12424 if (!htab->symbian_p)
12426 /* My reading of the SVR4 ABI indicates that the
12427 procedure linkage table relocs (DT_JMPREL) should be
12428 included in the overall relocs (DT_REL). This is
12429 what Solaris does. However, UnixWare can not handle
12430 that case. Therefore, we override the DT_RELSZ entry
12431 here to make it not include the JMPREL relocs. Since
12432 the linker script arranges for .rel(a).plt to follow all
12433 other relocation sections, we don't have to worry
12434 about changing the DT_REL entry. */
12435 s = htab->root.srelplt;
12437 dyn.d_un.d_val -= s->size;
12438 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12441 /* Fall through. */
12445 /* In the BPABI, the DT_REL tag must point at the file
12446 offset, not the VMA, of the first relocation
12447 section. So, we use code similar to that in
12448 elflink.c, but do not check for SHF_ALLOC on the
12449 relcoation section, since relocations sections are
12450 never allocated under the BPABI. The comments above
12451 about Unixware notwithstanding, we include all of the
12452 relocations here. */
12453 if (htab->symbian_p)
12456 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12457 ? SHT_REL : SHT_RELA);
12458 dyn.d_un.d_val = 0;
12459 for (i = 1; i < elf_numsections (output_bfd); i++)
12461 Elf_Internal_Shdr *hdr
12462 = elf_elfsections (output_bfd)[i];
12463 if (hdr->sh_type == type)
12465 if (dyn.d_tag == DT_RELSZ
12466 || dyn.d_tag == DT_RELASZ)
12467 dyn.d_un.d_val += hdr->sh_size;
12468 else if ((ufile_ptr) hdr->sh_offset
12469 <= dyn.d_un.d_val - 1)
12470 dyn.d_un.d_val = hdr->sh_offset;
12473 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12477 /* Set the bottom bit of DT_INIT/FINI if the
12478 corresponding function is Thumb. */
12480 name = info->init_function;
12483 name = info->fini_function;
12485 /* If it wasn't set by elf_bfd_final_link
12486 then there is nothing to adjust. */
12487 if (dyn.d_un.d_val != 0)
12489 struct elf_link_hash_entry * eh;
12491 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12492 FALSE, FALSE, TRUE);
12494 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12496 dyn.d_un.d_val |= 1;
12497 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12504 /* Fill in the first entry in the procedure linkage table. */
12505 if (splt->size > 0 && htab->plt_header_size)
12507 const bfd_vma *plt0_entry;
12508 bfd_vma got_address, plt_address, got_displacement;
12510 /* Calculate the addresses of the GOT and PLT. */
12511 got_address = sgot->output_section->vma + sgot->output_offset;
12512 plt_address = splt->output_section->vma + splt->output_offset;
12514 if (htab->vxworks_p)
12516 /* The VxWorks GOT is relocated by the dynamic linker.
12517 Therefore, we must emit relocations rather than simply
12518 computing the values now. */
12519 Elf_Internal_Rela rel;
12521 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12522 put_arm_insn (htab, output_bfd, plt0_entry[0],
12523 splt->contents + 0);
12524 put_arm_insn (htab, output_bfd, plt0_entry[1],
12525 splt->contents + 4);
12526 put_arm_insn (htab, output_bfd, plt0_entry[2],
12527 splt->contents + 8);
12528 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12530 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12531 rel.r_offset = plt_address + 12;
12532 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12534 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12535 htab->srelplt2->contents);
12539 got_displacement = got_address - (plt_address + 16);
12541 plt0_entry = elf32_arm_plt0_entry;
12542 put_arm_insn (htab, output_bfd, plt0_entry[0],
12543 splt->contents + 0);
12544 put_arm_insn (htab, output_bfd, plt0_entry[1],
12545 splt->contents + 4);
12546 put_arm_insn (htab, output_bfd, plt0_entry[2],
12547 splt->contents + 8);
12548 put_arm_insn (htab, output_bfd, plt0_entry[3],
12549 splt->contents + 12);
12551 #ifdef FOUR_WORD_PLT
12552 /* The displacement value goes in the otherwise-unused
12553 last word of the second entry. */
12554 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12556 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12561 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12562 really seem like the right value. */
12563 if (splt->output_section->owner == output_bfd)
12564 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12566 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
12568 /* Correct the .rel(a).plt.unloaded relocations. They will have
12569 incorrect symbol indexes. */
12573 num_plts = ((htab->root.splt->size - htab->plt_header_size)
12574 / htab->plt_entry_size);
12575 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12577 for (; num_plts; num_plts--)
12579 Elf_Internal_Rela rel;
12581 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12582 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12583 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12584 p += RELOC_SIZE (htab);
12586 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12587 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12588 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12589 p += RELOC_SIZE (htab);
12594 /* Fill in the first three entries in the global offset table. */
12597 if (sgot->size > 0)
12600 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12602 bfd_put_32 (output_bfd,
12603 sdyn->output_section->vma + sdyn->output_offset,
12605 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12606 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12609 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12616 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12618 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12619 struct elf32_arm_link_hash_table *globals;
12621 i_ehdrp = elf_elfheader (abfd);
12623 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12624 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12626 i_ehdrp->e_ident[EI_OSABI] = 0;
12627 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12631 globals = elf32_arm_hash_table (link_info);
12632 if (globals != NULL && globals->byteswap_code)
12633 i_ehdrp->e_flags |= EF_ARM_BE8;
12637 static enum elf_reloc_type_class
12638 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12640 switch ((int) ELF32_R_TYPE (rela->r_info))
12642 case R_ARM_RELATIVE:
12643 return reloc_class_relative;
12644 case R_ARM_JUMP_SLOT:
12645 return reloc_class_plt;
12647 return reloc_class_copy;
12649 return reloc_class_normal;
12653 /* Set the right machine number for an Arm ELF file. */
12656 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12658 if (hdr->sh_type == SHT_NOTE)
12659 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12665 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12667 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12670 /* Return TRUE if this is an unwinding table entry. */
12673 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12675 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12676 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12680 /* Set the type and flags for an ARM section. We do this by
12681 the section name, which is a hack, but ought to work. */
12684 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12688 name = bfd_get_section_name (abfd, sec);
12690 if (is_arm_elf_unwind_section_name (abfd, name))
12692 hdr->sh_type = SHT_ARM_EXIDX;
12693 hdr->sh_flags |= SHF_LINK_ORDER;
12698 /* Handle an ARM specific section when reading an object file. This is
12699 called when bfd_section_from_shdr finds a section with an unknown
12703 elf32_arm_section_from_shdr (bfd *abfd,
12704 Elf_Internal_Shdr * hdr,
12708 /* There ought to be a place to keep ELF backend specific flags, but
12709 at the moment there isn't one. We just keep track of the
12710 sections by their name, instead. Fortunately, the ABI gives
12711 names for all the ARM specific sections, so we will probably get
12713 switch (hdr->sh_type)
12715 case SHT_ARM_EXIDX:
12716 case SHT_ARM_PREEMPTMAP:
12717 case SHT_ARM_ATTRIBUTES:
12724 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12730 static _arm_elf_section_data *
12731 get_arm_elf_section_data (asection * sec)
12733 if (sec && sec->owner && is_arm_elf (sec->owner))
12734 return elf32_arm_section_data (sec);
12742 struct bfd_link_info *info;
12745 int (*func) (void *, const char *, Elf_Internal_Sym *,
12746 asection *, struct elf_link_hash_entry *);
12747 } output_arch_syminfo;
12749 enum map_symbol_type
12757 /* Output a single mapping symbol. */
12760 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12761 enum map_symbol_type type,
12764 static const char *names[3] = {"$a", "$t", "$d"};
12765 Elf_Internal_Sym sym;
12767 sym.st_value = osi->sec->output_section->vma
12768 + osi->sec->output_offset
12772 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12773 sym.st_shndx = osi->sec_shndx;
12774 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12775 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12779 /* Output mapping symbols for PLT entries associated with H. */
12782 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12784 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12785 struct elf32_arm_link_hash_table *htab;
12786 struct elf32_arm_link_hash_entry *eh;
12789 if (h->root.type == bfd_link_hash_indirect)
12792 if (h->root.type == bfd_link_hash_warning)
12793 /* When warning symbols are created, they **replace** the "real"
12794 entry in the hash table, thus we never get to see the real
12795 symbol in a hash traversal. So look at it now. */
12796 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12798 if (h->plt.offset == (bfd_vma) -1)
12801 htab = elf32_arm_hash_table (osi->info);
12805 eh = (struct elf32_arm_link_hash_entry *) h;
12806 addr = h->plt.offset;
12807 if (htab->symbian_p)
12809 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12811 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12814 else if (htab->vxworks_p)
12816 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12818 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12820 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12822 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12827 bfd_signed_vma thumb_refs;
12829 thumb_refs = eh->plt_thumb_refcount;
12830 if (!htab->use_blx)
12831 thumb_refs += eh->plt_maybe_thumb_refcount;
12833 if (thumb_refs > 0)
12835 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12838 #ifdef FOUR_WORD_PLT
12839 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12841 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12844 /* A three-word PLT with no Thumb thunk contains only Arm code,
12845 so only need to output a mapping symbol for the first PLT entry and
12846 entries with thumb thunks. */
12847 if (thumb_refs > 0 || addr == 20)
12849 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12858 /* Output a single local symbol for a generated stub. */
12861 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12862 bfd_vma offset, bfd_vma size)
12864 Elf_Internal_Sym sym;
12866 sym.st_value = osi->sec->output_section->vma
12867 + osi->sec->output_offset
12869 sym.st_size = size;
12871 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12872 sym.st_shndx = osi->sec_shndx;
12873 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12877 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12880 struct elf32_arm_stub_hash_entry *stub_entry;
12881 asection *stub_sec;
12884 output_arch_syminfo *osi;
12885 const insn_sequence *template_sequence;
12886 enum stub_insn_type prev_type;
12889 enum map_symbol_type sym_type;
12891 /* Massage our args to the form they really have. */
12892 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12893 osi = (output_arch_syminfo *) in_arg;
12895 stub_sec = stub_entry->stub_sec;
12897 /* Ensure this stub is attached to the current section being
12899 if (stub_sec != osi->sec)
12902 addr = (bfd_vma) stub_entry->stub_offset;
12903 stub_name = stub_entry->output_name;
12905 template_sequence = stub_entry->stub_template;
12906 switch (template_sequence[0].type)
12909 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12914 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12915 stub_entry->stub_size))
12923 prev_type = DATA_TYPE;
12925 for (i = 0; i < stub_entry->stub_template_size; i++)
12927 switch (template_sequence[i].type)
12930 sym_type = ARM_MAP_ARM;
12935 sym_type = ARM_MAP_THUMB;
12939 sym_type = ARM_MAP_DATA;
12947 if (template_sequence[i].type != prev_type)
12949 prev_type = template_sequence[i].type;
12950 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12954 switch (template_sequence[i].type)
12978 /* Output mapping symbols for linker generated sections,
12979 and for those data-only sections that do not have a
12983 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12984 struct bfd_link_info *info,
12986 int (*func) (void *, const char *,
12987 Elf_Internal_Sym *,
12989 struct elf_link_hash_entry *))
12991 output_arch_syminfo osi;
12992 struct elf32_arm_link_hash_table *htab;
12994 bfd_size_type size;
12997 htab = elf32_arm_hash_table (info);
13001 check_use_blx (htab);
13007 /* Add a $d mapping symbol to data-only sections that
13008 don't have any mapping symbol. This may result in (harmless) redundant
13009 mapping symbols. */
13010 for (input_bfd = info->input_bfds;
13012 input_bfd = input_bfd->link_next)
13014 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13015 for (osi.sec = input_bfd->sections;
13017 osi.sec = osi.sec->next)
13019 if (osi.sec->output_section != NULL
13020 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13022 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13023 == SEC_HAS_CONTENTS
13024 && get_arm_elf_section_data (osi.sec) != NULL
13025 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13026 && osi.sec->size > 0)
13028 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13029 (output_bfd, osi.sec->output_section);
13030 if (osi.sec_shndx != (int)SHN_BAD)
13031 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13036 /* ARM->Thumb glue. */
13037 if (htab->arm_glue_size > 0)
13039 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13040 ARM2THUMB_GLUE_SECTION_NAME);
13042 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13043 (output_bfd, osi.sec->output_section);
13044 if (info->shared || htab->root.is_relocatable_executable
13045 || htab->pic_veneer)
13046 size = ARM2THUMB_PIC_GLUE_SIZE;
13047 else if (htab->use_blx)
13048 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13050 size = ARM2THUMB_STATIC_GLUE_SIZE;
13052 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13054 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13055 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13059 /* Thumb->ARM glue. */
13060 if (htab->thumb_glue_size > 0)
13062 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13063 THUMB2ARM_GLUE_SECTION_NAME);
13065 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13066 (output_bfd, osi.sec->output_section);
13067 size = THUMB2ARM_GLUE_SIZE;
13069 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13071 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13072 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13076 /* ARMv4 BX veneers. */
13077 if (htab->bx_glue_size > 0)
13079 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13080 ARM_BX_GLUE_SECTION_NAME);
13082 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13083 (output_bfd, osi.sec->output_section);
13085 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13088 /* Long calls stubs. */
13089 if (htab->stub_bfd && htab->stub_bfd->sections)
13091 asection* stub_sec;
13093 for (stub_sec = htab->stub_bfd->sections;
13095 stub_sec = stub_sec->next)
13097 /* Ignore non-stub sections. */
13098 if (!strstr (stub_sec->name, STUB_SUFFIX))
13101 osi.sec = stub_sec;
13103 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13104 (output_bfd, osi.sec->output_section);
13106 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13110 /* Finally, output mapping symbols for the PLT. */
13111 if (!htab->root.splt || htab->root.splt->size == 0)
13114 osi.sec = htab->root.splt;
13115 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13116 osi.sec->output_section);
13117 /* Output mapping symbols for the plt header. SymbianOS does not have a
13119 if (htab->vxworks_p)
13121 /* VxWorks shared libraries have no PLT header. */
13124 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13126 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13130 else if (!htab->symbian_p)
13132 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13134 #ifndef FOUR_WORD_PLT
13135 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13140 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13144 /* Allocate target specific section data. */
13147 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13149 if (!sec->used_by_bfd)
13151 _arm_elf_section_data *sdata;
13152 bfd_size_type amt = sizeof (*sdata);
13154 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13157 sec->used_by_bfd = sdata;
13160 return _bfd_elf_new_section_hook (abfd, sec);
13164 /* Used to order a list of mapping symbols by address. */
13167 elf32_arm_compare_mapping (const void * a, const void * b)
13169 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13170 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13172 if (amap->vma > bmap->vma)
13174 else if (amap->vma < bmap->vma)
13176 else if (amap->type > bmap->type)
13177 /* Ensure results do not depend on the host qsort for objects with
13178 multiple mapping symbols at the same address by sorting on type
13181 else if (amap->type < bmap->type)
13187 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13189 static unsigned long
13190 offset_prel31 (unsigned long addr, bfd_vma offset)
13192 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13195 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13199 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13201 unsigned long first_word = bfd_get_32 (output_bfd, from);
13202 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13204 /* High bit of first word is supposed to be zero. */
13205 if ((first_word & 0x80000000ul) == 0)
13206 first_word = offset_prel31 (first_word, offset);
13208 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13209 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13210 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13211 second_word = offset_prel31 (second_word, offset);
13213 bfd_put_32 (output_bfd, first_word, to);
13214 bfd_put_32 (output_bfd, second_word, to + 4);
13217 /* Data for make_branch_to_a8_stub(). */
13219 struct a8_branch_to_stub_data {
13220 asection *writing_section;
13221 bfd_byte *contents;
13225 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13226 places for a particular section. */
13229 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13232 struct elf32_arm_stub_hash_entry *stub_entry;
13233 struct a8_branch_to_stub_data *data;
13234 bfd_byte *contents;
13235 unsigned long branch_insn;
13236 bfd_vma veneered_insn_loc, veneer_entry_loc;
13237 bfd_signed_vma branch_offset;
13239 unsigned int target;
13241 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13242 data = (struct a8_branch_to_stub_data *) in_arg;
13244 if (stub_entry->target_section != data->writing_section
13245 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
13248 contents = data->contents;
13250 veneered_insn_loc = stub_entry->target_section->output_section->vma
13251 + stub_entry->target_section->output_offset
13252 + stub_entry->target_value;
13254 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13255 + stub_entry->stub_sec->output_offset
13256 + stub_entry->stub_offset;
13258 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13259 veneered_insn_loc &= ~3u;
13261 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13263 abfd = stub_entry->target_section->owner;
13264 target = stub_entry->target_value;
13266 /* We attempt to avoid this condition by setting stubs_always_after_branch
13267 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13268 This check is just to be on the safe side... */
13269 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13271 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13272 "allocated in unsafe location"), abfd);
13276 switch (stub_entry->stub_type)
13278 case arm_stub_a8_veneer_b:
13279 case arm_stub_a8_veneer_b_cond:
13280 branch_insn = 0xf0009000;
13283 case arm_stub_a8_veneer_blx:
13284 branch_insn = 0xf000e800;
13287 case arm_stub_a8_veneer_bl:
13289 unsigned int i1, j1, i2, j2, s;
13291 branch_insn = 0xf000d000;
13294 if (branch_offset < -16777216 || branch_offset > 16777214)
13296 /* There's not much we can do apart from complain if this
13298 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13299 "of range (input file too large)"), abfd);
13303 /* i1 = not(j1 eor s), so:
13305 j1 = (not i1) eor s. */
13307 branch_insn |= (branch_offset >> 1) & 0x7ff;
13308 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13309 i2 = (branch_offset >> 22) & 1;
13310 i1 = (branch_offset >> 23) & 1;
13311 s = (branch_offset >> 24) & 1;
13314 branch_insn |= j2 << 11;
13315 branch_insn |= j1 << 13;
13316 branch_insn |= s << 26;
13325 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13326 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13331 /* Do code byteswapping. Return FALSE afterwards so that the section is
13332 written out as normal. */
13335 elf32_arm_write_section (bfd *output_bfd,
13336 struct bfd_link_info *link_info,
13338 bfd_byte *contents)
13340 unsigned int mapcount, errcount;
13341 _arm_elf_section_data *arm_data;
13342 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13343 elf32_arm_section_map *map;
13344 elf32_vfp11_erratum_list *errnode;
13347 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13351 if (globals == NULL)
13354 /* If this section has not been allocated an _arm_elf_section_data
13355 structure then we cannot record anything. */
13356 arm_data = get_arm_elf_section_data (sec);
13357 if (arm_data == NULL)
13360 mapcount = arm_data->mapcount;
13361 map = arm_data->map;
13362 errcount = arm_data->erratumcount;
13366 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13368 for (errnode = arm_data->erratumlist; errnode != 0;
13369 errnode = errnode->next)
13371 bfd_vma target = errnode->vma - offset;
13373 switch (errnode->type)
13375 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13377 bfd_vma branch_to_veneer;
13378 /* Original condition code of instruction, plus bit mask for
13379 ARM B instruction. */
13380 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13383 /* The instruction is before the label. */
13386 /* Above offset included in -4 below. */
13387 branch_to_veneer = errnode->u.b.veneer->vma
13388 - errnode->vma - 4;
13390 if ((signed) branch_to_veneer < -(1 << 25)
13391 || (signed) branch_to_veneer >= (1 << 25))
13392 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13393 "range"), output_bfd);
13395 insn |= (branch_to_veneer >> 2) & 0xffffff;
13396 contents[endianflip ^ target] = insn & 0xff;
13397 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13398 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13399 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13403 case VFP11_ERRATUM_ARM_VENEER:
13405 bfd_vma branch_from_veneer;
13408 /* Take size of veneer into account. */
13409 branch_from_veneer = errnode->u.v.branch->vma
13410 - errnode->vma - 12;
13412 if ((signed) branch_from_veneer < -(1 << 25)
13413 || (signed) branch_from_veneer >= (1 << 25))
13414 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13415 "range"), output_bfd);
13417 /* Original instruction. */
13418 insn = errnode->u.v.branch->u.b.vfp_insn;
13419 contents[endianflip ^ target] = insn & 0xff;
13420 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13421 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13422 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13424 /* Branch back to insn after original insn. */
13425 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13426 contents[endianflip ^ (target + 4)] = insn & 0xff;
13427 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13428 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13429 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13439 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13441 arm_unwind_table_edit *edit_node
13442 = arm_data->u.exidx.unwind_edit_list;
13443 /* Now, sec->size is the size of the section we will write. The original
13444 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13445 markers) was sec->rawsize. (This isn't the case if we perform no
13446 edits, then rawsize will be zero and we should use size). */
13447 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13448 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13449 unsigned int in_index, out_index;
13450 bfd_vma add_to_offsets = 0;
13452 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13456 unsigned int edit_index = edit_node->index;
13458 if (in_index < edit_index && in_index * 8 < input_size)
13460 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13461 contents + in_index * 8, add_to_offsets);
13465 else if (in_index == edit_index
13466 || (in_index * 8 >= input_size
13467 && edit_index == UINT_MAX))
13469 switch (edit_node->type)
13471 case DELETE_EXIDX_ENTRY:
13473 add_to_offsets += 8;
13476 case INSERT_EXIDX_CANTUNWIND_AT_END:
13478 asection *text_sec = edit_node->linked_section;
13479 bfd_vma text_offset = text_sec->output_section->vma
13480 + text_sec->output_offset
13482 bfd_vma exidx_offset = offset + out_index * 8;
13483 unsigned long prel31_offset;
13485 /* Note: this is meant to be equivalent to an
13486 R_ARM_PREL31 relocation. These synthetic
13487 EXIDX_CANTUNWIND markers are not relocated by the
13488 usual BFD method. */
13489 prel31_offset = (text_offset - exidx_offset)
13492 /* First address we can't unwind. */
13493 bfd_put_32 (output_bfd, prel31_offset,
13494 &edited_contents[out_index * 8]);
13496 /* Code for EXIDX_CANTUNWIND. */
13497 bfd_put_32 (output_bfd, 0x1,
13498 &edited_contents[out_index * 8 + 4]);
13501 add_to_offsets -= 8;
13506 edit_node = edit_node->next;
13511 /* No more edits, copy remaining entries verbatim. */
13512 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13513 contents + in_index * 8, add_to_offsets);
13519 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13520 bfd_set_section_contents (output_bfd, sec->output_section,
13522 (file_ptr) sec->output_offset, sec->size);
13527 /* Fix code to point to Cortex-A8 erratum stubs. */
13528 if (globals->fix_cortex_a8)
13530 struct a8_branch_to_stub_data data;
13532 data.writing_section = sec;
13533 data.contents = contents;
13535 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13542 if (globals->byteswap_code)
13544 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13547 for (i = 0; i < mapcount; i++)
13549 if (i == mapcount - 1)
13552 end = map[i + 1].vma;
13554 switch (map[i].type)
13557 /* Byte swap code words. */
13558 while (ptr + 3 < end)
13560 tmp = contents[ptr];
13561 contents[ptr] = contents[ptr + 3];
13562 contents[ptr + 3] = tmp;
13563 tmp = contents[ptr + 1];
13564 contents[ptr + 1] = contents[ptr + 2];
13565 contents[ptr + 2] = tmp;
13571 /* Byte swap code halfwords. */
13572 while (ptr + 1 < end)
13574 tmp = contents[ptr];
13575 contents[ptr] = contents[ptr + 1];
13576 contents[ptr + 1] = tmp;
13582 /* Leave data alone. */
13590 arm_data->mapcount = -1;
13591 arm_data->mapsize = 0;
13592 arm_data->map = NULL;
13597 /* Display STT_ARM_TFUNC symbols as functions. */
13600 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13603 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13605 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13606 elfsym->symbol.flags |= BSF_FUNCTION;
13610 /* Mangle thumb function symbols as we read them in. */
13613 elf32_arm_swap_symbol_in (bfd * abfd,
13616 Elf_Internal_Sym *dst)
13618 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13621 /* New EABI objects mark thumb function symbols by setting the low bit of
13622 the address. Turn these into STT_ARM_TFUNC. */
13623 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13624 && (dst->st_value & 1))
13626 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13627 dst->st_value &= ~(bfd_vma) 1;
13633 /* Mangle thumb function symbols as we write them out. */
13636 elf32_arm_swap_symbol_out (bfd *abfd,
13637 const Elf_Internal_Sym *src,
13641 Elf_Internal_Sym newsym;
13643 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13644 of the address set, as per the new EABI. We do this unconditionally
13645 because objcopy does not set the elf header flags until after
13646 it writes out the symbol table. */
13647 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13650 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13651 if (newsym.st_shndx != SHN_UNDEF)
13653 /* Do this only for defined symbols. At link type, the static
13654 linker will simulate the work of dynamic linker of resolving
13655 symbols and will carry over the thumbness of found symbols to
13656 the output symbol table. It's not clear how it happens, but
13657 the thumbness of undefined symbols can well be different at
13658 runtime, and writing '1' for them will be confusing for users
13659 and possibly for dynamic linker itself.
13661 newsym.st_value |= 1;
13666 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13669 /* Add the PT_ARM_EXIDX program header. */
13672 elf32_arm_modify_segment_map (bfd *abfd,
13673 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13675 struct elf_segment_map *m;
13678 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13679 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13681 /* If there is already a PT_ARM_EXIDX header, then we do not
13682 want to add another one. This situation arises when running
13683 "strip"; the input binary already has the header. */
13684 m = elf_tdata (abfd)->segment_map;
13685 while (m && m->p_type != PT_ARM_EXIDX)
13689 m = (struct elf_segment_map *)
13690 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13693 m->p_type = PT_ARM_EXIDX;
13695 m->sections[0] = sec;
13697 m->next = elf_tdata (abfd)->segment_map;
13698 elf_tdata (abfd)->segment_map = m;
13705 /* We may add a PT_ARM_EXIDX program header. */
13708 elf32_arm_additional_program_headers (bfd *abfd,
13709 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13713 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13714 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13720 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13723 elf32_arm_is_function_type (unsigned int type)
13725 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13728 /* We use this to override swap_symbol_in and swap_symbol_out. */
13729 const struct elf_size_info elf32_arm_size_info =
13731 sizeof (Elf32_External_Ehdr),
13732 sizeof (Elf32_External_Phdr),
13733 sizeof (Elf32_External_Shdr),
13734 sizeof (Elf32_External_Rel),
13735 sizeof (Elf32_External_Rela),
13736 sizeof (Elf32_External_Sym),
13737 sizeof (Elf32_External_Dyn),
13738 sizeof (Elf_External_Note),
13742 ELFCLASS32, EV_CURRENT,
13743 bfd_elf32_write_out_phdrs,
13744 bfd_elf32_write_shdrs_and_ehdr,
13745 bfd_elf32_checksum_contents,
13746 bfd_elf32_write_relocs,
13747 elf32_arm_swap_symbol_in,
13748 elf32_arm_swap_symbol_out,
13749 bfd_elf32_slurp_reloc_table,
13750 bfd_elf32_slurp_symbol_table,
13751 bfd_elf32_swap_dyn_in,
13752 bfd_elf32_swap_dyn_out,
13753 bfd_elf32_swap_reloc_in,
13754 bfd_elf32_swap_reloc_out,
13755 bfd_elf32_swap_reloca_in,
13756 bfd_elf32_swap_reloca_out
13759 #define ELF_ARCH bfd_arch_arm
13760 #define ELF_TARGET_ID ARM_ELF_DATA
13761 #define ELF_MACHINE_CODE EM_ARM
13762 #ifdef __QNXTARGET__
13763 #define ELF_MAXPAGESIZE 0x1000
13765 #define ELF_MAXPAGESIZE 0x8000
13767 #define ELF_MINPAGESIZE 0x1000
13768 #define ELF_COMMONPAGESIZE 0x1000
13770 #define bfd_elf32_mkobject elf32_arm_mkobject
13772 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13773 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13774 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13775 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13776 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13777 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13778 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13779 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13780 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13781 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13782 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13783 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13784 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13786 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13787 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13788 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13789 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13790 #define elf_backend_check_relocs elf32_arm_check_relocs
13791 #define elf_backend_relocate_section elf32_arm_relocate_section
13792 #define elf_backend_write_section elf32_arm_write_section
13793 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13794 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13795 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13796 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13797 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13798 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13799 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13800 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13801 #define elf_backend_object_p elf32_arm_object_p
13802 #define elf_backend_section_flags elf32_arm_section_flags
13803 #define elf_backend_fake_sections elf32_arm_fake_sections
13804 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13805 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13806 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13807 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13808 #define elf_backend_size_info elf32_arm_size_info
13809 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13810 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13811 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13812 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13813 #define elf_backend_is_function_type elf32_arm_is_function_type
13815 #define elf_backend_can_refcount 1
13816 #define elf_backend_can_gc_sections 1
13817 #define elf_backend_plt_readonly 1
13818 #define elf_backend_want_got_plt 1
13819 #define elf_backend_want_plt_sym 0
13820 #define elf_backend_may_use_rel_p 1
13821 #define elf_backend_may_use_rela_p 0
13822 #define elf_backend_default_use_rela_p 0
13824 #define elf_backend_got_header_size 12
13826 #undef elf_backend_obj_attrs_vendor
13827 #define elf_backend_obj_attrs_vendor "aeabi"
13828 #undef elf_backend_obj_attrs_section
13829 #define elf_backend_obj_attrs_section ".ARM.attributes"
13830 #undef elf_backend_obj_attrs_arg_type
13831 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13832 #undef elf_backend_obj_attrs_section_type
13833 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13834 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13835 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
13837 #include "elf32-target.h"
13839 /* VxWorks Targets. */
13841 #undef TARGET_LITTLE_SYM
13842 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13843 #undef TARGET_LITTLE_NAME
13844 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13845 #undef TARGET_BIG_SYM
13846 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13847 #undef TARGET_BIG_NAME
13848 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13850 /* Like elf32_arm_link_hash_table_create -- but overrides
13851 appropriately for VxWorks. */
13853 static struct bfd_link_hash_table *
13854 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13856 struct bfd_link_hash_table *ret;
13858 ret = elf32_arm_link_hash_table_create (abfd);
13861 struct elf32_arm_link_hash_table *htab
13862 = (struct elf32_arm_link_hash_table *) ret;
13864 htab->vxworks_p = 1;
13870 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13872 elf32_arm_final_write_processing (abfd, linker);
13873 elf_vxworks_final_write_processing (abfd, linker);
13877 #define elf32_bed elf32_arm_vxworks_bed
13879 #undef bfd_elf32_bfd_link_hash_table_create
13880 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13881 #undef elf_backend_add_symbol_hook
13882 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13883 #undef elf_backend_final_write_processing
13884 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13885 #undef elf_backend_emit_relocs
13886 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13888 #undef elf_backend_may_use_rel_p
13889 #define elf_backend_may_use_rel_p 0
13890 #undef elf_backend_may_use_rela_p
13891 #define elf_backend_may_use_rela_p 1
13892 #undef elf_backend_default_use_rela_p
13893 #define elf_backend_default_use_rela_p 1
13894 #undef elf_backend_want_plt_sym
13895 #define elf_backend_want_plt_sym 1
13896 #undef ELF_MAXPAGESIZE
13897 #define ELF_MAXPAGESIZE 0x1000
13899 #include "elf32-target.h"
13902 /* Merge backend specific data from an object file to the output
13903 object file when linking. */
13906 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
13908 flagword out_flags;
13910 bfd_boolean flags_compatible = TRUE;
13913 /* Check if we have the same endianess. */
13914 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
13917 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13920 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
13923 /* The input BFD must have had its flags initialised. */
13924 /* The following seems bogus to me -- The flags are initialized in
13925 the assembler but I don't think an elf_flags_init field is
13926 written into the object. */
13927 /* BFD_ASSERT (elf_flags_init (ibfd)); */
13929 in_flags = elf_elfheader (ibfd)->e_flags;
13930 out_flags = elf_elfheader (obfd)->e_flags;
13932 /* In theory there is no reason why we couldn't handle this. However
13933 in practice it isn't even close to working and there is no real
13934 reason to want it. */
13935 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
13936 && !(ibfd->flags & DYNAMIC)
13937 && (in_flags & EF_ARM_BE8))
13939 _bfd_error_handler (_("error: %B is already in final BE8 format"),
13944 if (!elf_flags_init (obfd))
13946 /* If the input is the default architecture and had the default
13947 flags then do not bother setting the flags for the output
13948 architecture, instead allow future merges to do this. If no
13949 future merges ever set these flags then they will retain their
13950 uninitialised values, which surprise surprise, correspond
13951 to the default values. */
13952 if (bfd_get_arch_info (ibfd)->the_default
13953 && elf_elfheader (ibfd)->e_flags == 0)
13956 elf_flags_init (obfd) = TRUE;
13957 elf_elfheader (obfd)->e_flags = in_flags;
13959 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
13960 && bfd_get_arch_info (obfd)->the_default)
13961 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
13966 /* Determine what should happen if the input ARM architecture
13967 does not match the output ARM architecture. */
13968 if (! bfd_arm_merge_machines (ibfd, obfd))
13971 /* Identical flags must be compatible. */
13972 if (in_flags == out_flags)
13975 /* Check to see if the input BFD actually contains any sections. If
13976 not, its flags may not have been initialised either, but it
13977 cannot actually cause any incompatiblity. Do not short-circuit
13978 dynamic objects; their section list may be emptied by
13979 elf_link_add_object_symbols.
13981 Also check to see if there are no code sections in the input.
13982 In this case there is no need to check for code specific flags.
13983 XXX - do we need to worry about floating-point format compatability
13984 in data sections ? */
13985 if (!(ibfd->flags & DYNAMIC))
13987 bfd_boolean null_input_bfd = TRUE;
13988 bfd_boolean only_data_sections = TRUE;
13990 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
13992 /* Ignore synthetic glue sections. */
13993 if (strcmp (sec->name, ".glue_7")
13994 && strcmp (sec->name, ".glue_7t"))
13996 if ((bfd_get_section_flags (ibfd, sec)
13997 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
13998 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
13999 only_data_sections = FALSE;
14001 null_input_bfd = FALSE;
14006 if (null_input_bfd || only_data_sections)
14010 /* Complain about various flag mismatches. */
14011 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14012 EF_ARM_EABI_VERSION (out_flags)))
14015 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14017 (in_flags & EF_ARM_EABIMASK) >> 24,
14018 (out_flags & EF_ARM_EABIMASK) >> 24);
14022 /* Not sure what needs to be checked for EABI versions >= 1. */
14023 /* VxWorks libraries do not use these flags. */
14024 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14025 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14026 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14028 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14031 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14033 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14034 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14035 flags_compatible = FALSE;
14038 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14040 if (in_flags & EF_ARM_APCS_FLOAT)
14042 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14046 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14049 flags_compatible = FALSE;
14052 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14054 if (in_flags & EF_ARM_VFP_FLOAT)
14056 (_("error: %B uses VFP instructions, whereas %B does not"),
14060 (_("error: %B uses FPA instructions, whereas %B does not"),
14063 flags_compatible = FALSE;
14066 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14068 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14070 (_("error: %B uses Maverick instructions, whereas %B does not"),
14074 (_("error: %B does not use Maverick instructions, whereas %B does"),
14077 flags_compatible = FALSE;
14080 #ifdef EF_ARM_SOFT_FLOAT
14081 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14083 /* We can allow interworking between code that is VFP format
14084 layout, and uses either soft float or integer regs for
14085 passing floating point arguments and results. We already
14086 know that the APCS_FLOAT flags match; similarly for VFP
14088 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14089 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14091 if (in_flags & EF_ARM_SOFT_FLOAT)
14093 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14097 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14100 flags_compatible = FALSE;
14105 /* Interworking mismatch is only a warning. */
14106 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14108 if (in_flags & EF_ARM_INTERWORK)
14111 (_("Warning: %B supports interworking, whereas %B does not"),
14117 (_("Warning: %B does not support interworking, whereas %B does"),
14123 return flags_compatible;
14127 /* Symbian OS Targets. */
14129 #undef TARGET_LITTLE_SYM
14130 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14131 #undef TARGET_LITTLE_NAME
14132 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14133 #undef TARGET_BIG_SYM
14134 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14135 #undef TARGET_BIG_NAME
14136 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14138 /* Like elf32_arm_link_hash_table_create -- but overrides
14139 appropriately for Symbian OS. */
14141 static struct bfd_link_hash_table *
14142 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14144 struct bfd_link_hash_table *ret;
14146 ret = elf32_arm_link_hash_table_create (abfd);
14149 struct elf32_arm_link_hash_table *htab
14150 = (struct elf32_arm_link_hash_table *)ret;
14151 /* There is no PLT header for Symbian OS. */
14152 htab->plt_header_size = 0;
14153 /* The PLT entries are each one instruction and one word. */
14154 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14155 htab->symbian_p = 1;
14156 /* Symbian uses armv5t or above, so use_blx is always true. */
14158 htab->root.is_relocatable_executable = 1;
14163 static const struct bfd_elf_special_section
14164 elf32_arm_symbian_special_sections[] =
14166 /* In a BPABI executable, the dynamic linking sections do not go in
14167 the loadable read-only segment. The post-linker may wish to
14168 refer to these sections, but they are not part of the final
14170 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14171 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14172 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14173 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14174 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14175 /* These sections do not need to be writable as the SymbianOS
14176 postlinker will arrange things so that no dynamic relocation is
14178 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14179 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14180 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14181 { NULL, 0, 0, 0, 0 }
14185 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14186 struct bfd_link_info *link_info)
14188 /* BPABI objects are never loaded directly by an OS kernel; they are
14189 processed by a postlinker first, into an OS-specific format. If
14190 the D_PAGED bit is set on the file, BFD will align segments on
14191 page boundaries, so that an OS can directly map the file. With
14192 BPABI objects, that just results in wasted space. In addition,
14193 because we clear the D_PAGED bit, map_sections_to_segments will
14194 recognize that the program headers should not be mapped into any
14195 loadable segment. */
14196 abfd->flags &= ~D_PAGED;
14197 elf32_arm_begin_write_processing (abfd, link_info);
14201 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14202 struct bfd_link_info *info)
14204 struct elf_segment_map *m;
14207 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14208 segment. However, because the .dynamic section is not marked
14209 with SEC_LOAD, the generic ELF code will not create such a
14211 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14214 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14215 if (m->p_type == PT_DYNAMIC)
14220 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14221 m->next = elf_tdata (abfd)->segment_map;
14222 elf_tdata (abfd)->segment_map = m;
14226 /* Also call the generic arm routine. */
14227 return elf32_arm_modify_segment_map (abfd, info);
14230 /* Return address for Ith PLT stub in section PLT, for relocation REL
14231 or (bfd_vma) -1 if it should not be included. */
14234 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14235 const arelent *rel ATTRIBUTE_UNUSED)
14237 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14242 #define elf32_bed elf32_arm_symbian_bed
14244 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14245 will process them and then discard them. */
14246 #undef ELF_DYNAMIC_SEC_FLAGS
14247 #define ELF_DYNAMIC_SEC_FLAGS \
14248 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14250 #undef elf_backend_add_symbol_hook
14251 #undef elf_backend_emit_relocs
14253 #undef bfd_elf32_bfd_link_hash_table_create
14254 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14255 #undef elf_backend_special_sections
14256 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14257 #undef elf_backend_begin_write_processing
14258 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14259 #undef elf_backend_final_write_processing
14260 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14262 #undef elf_backend_modify_segment_map
14263 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14265 /* There is no .got section for BPABI objects, and hence no header. */
14266 #undef elf_backend_got_header_size
14267 #define elf_backend_got_header_size 0
14269 /* Similarly, there is no .got.plt section. */
14270 #undef elf_backend_want_got_plt
14271 #define elf_backend_want_got_plt 0
14273 #undef elf_backend_plt_sym_val
14274 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14276 #undef elf_backend_may_use_rel_p
14277 #define elf_backend_may_use_rel_p 1
14278 #undef elf_backend_may_use_rela_p
14279 #define elf_backend_may_use_rela_p 0
14280 #undef elf_backend_default_use_rela_p
14281 #define elf_backend_default_use_rela_p 0
14282 #undef elf_backend_want_plt_sym
14283 #define elf_backend_want_plt_sym 0
14284 #undef ELF_MAXPAGESIZE
14285 #define ELF_MAXPAGESIZE 0x8000
14287 #include "elf32-target.h"