1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "bfd_stdint.h"
27 #include "libiberty.h"
31 #include "elf-vxworks.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto 0
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
78 static reloc_howto_type elf32_arm_howto_table_1[] =
81 HOWTO (R_ARM_NONE, /* type */
83 0, /* size (0 = byte, 1 = short, 2 = long) */
85 FALSE, /* pc_relative */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 FALSE, /* partial_inplace */
93 FALSE), /* pcrel_offset */
95 HOWTO (R_ARM_PC24, /* type */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
99 TRUE, /* pc_relative */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 FALSE, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 TRUE), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
114 FALSE, /* pc_relative */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 FALSE, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 FALSE), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
129 TRUE, /* pc_relative */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 FALSE, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 TRUE), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
144 TRUE, /* pc_relative */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 FALSE, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 TRUE), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
159 FALSE, /* pc_relative */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 FALSE, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 FALSE), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
174 FALSE, /* pc_relative */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 FALSE, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 FALSE), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5, /* type */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
188 FALSE, /* pc_relative */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 FALSE, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 FALSE), /* pcrel_offset */
199 HOWTO (R_ARM_ABS8, /* type */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
203 FALSE, /* pc_relative */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 FALSE, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 FALSE), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32, /* type */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
217 FALSE, /* pc_relative */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 FALSE, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 FALSE), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL, /* type */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
231 TRUE, /* pc_relative */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 FALSE, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 TRUE), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8, /* type */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
245 TRUE, /* pc_relative */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 FALSE, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 TRUE), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ, /* type */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
259 FALSE, /* pc_relative */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 FALSE, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 FALSE), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC, /* type */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
273 FALSE, /* pc_relative */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 FALSE, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 FALSE), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8, /* type */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
287 FALSE, /* pc_relative */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 FALSE, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 FALSE), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
302 TRUE, /* pc_relative */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 FALSE, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 TRUE), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
317 TRUE, /* pc_relative */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 FALSE, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 TRUE), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
333 FALSE, /* pc_relative */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 TRUE, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 FALSE), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
347 FALSE, /* pc_relative */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 TRUE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
361 FALSE, /* pc_relative */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 TRUE, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 FALSE), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY, /* type */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
377 FALSE, /* pc_relative */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 TRUE, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 FALSE), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT, /* type */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
391 FALSE, /* pc_relative */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 TRUE, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 FALSE), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
405 FALSE, /* pc_relative */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 TRUE, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 FALSE), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE, /* type */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
419 FALSE, /* pc_relative */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 TRUE, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 FALSE), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32, /* type */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
433 FALSE, /* pc_relative */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 TRUE, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 FALSE), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC, /* type */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
447 TRUE, /* pc_relative */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 TRUE, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 TRUE), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32, /* type */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
461 FALSE, /* pc_relative */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 TRUE, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 FALSE), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32, /* type */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
475 TRUE, /* pc_relative */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 FALSE, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 TRUE), /* pcrel_offset */
485 HOWTO (R_ARM_CALL, /* type */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
489 TRUE, /* pc_relative */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 FALSE, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 TRUE), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24, /* type */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
503 TRUE, /* pc_relative */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 FALSE, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 TRUE), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24, /* type */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
517 TRUE, /* pc_relative */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 FALSE, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 TRUE), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS, /* type */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
531 FALSE, /* pc_relative */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 FALSE, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 FALSE), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
545 TRUE, /* pc_relative */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 FALSE, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 TRUE), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
559 TRUE, /* pc_relative */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 FALSE, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 TRUE), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
573 TRUE, /* pc_relative */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 FALSE, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 TRUE), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
587 FALSE, /* pc_relative */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 FALSE, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 FALSE), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
601 FALSE, /* pc_relative */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 FALSE, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 FALSE), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
615 FALSE, /* pc_relative */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 FALSE, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 FALSE), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1, /* type */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
629 FALSE, /* pc_relative */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 FALSE, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 FALSE), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32, /* type */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
643 FALSE, /* pc_relative */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 FALSE, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 FALSE), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX, /* type */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
657 FALSE, /* pc_relative */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 FALSE, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 FALSE), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2, /* type */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
671 FALSE, /* pc_relative */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 FALSE, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 TRUE), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31, /* type */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
685 TRUE, /* pc_relative */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 FALSE, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 TRUE), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
699 FALSE, /* pc_relative */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 FALSE, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 FALSE), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS, /* type */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
713 FALSE, /* pc_relative */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 FALSE, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 FALSE), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
727 TRUE, /* pc_relative */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 FALSE, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 TRUE), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL, /* type */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
741 TRUE, /* pc_relative */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 FALSE, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 TRUE), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
755 FALSE, /* pc_relative */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 FALSE, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 FALSE), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
769 FALSE, /* pc_relative */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 FALSE, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 FALSE), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
783 TRUE, /* pc_relative */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 FALSE, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 TRUE), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
797 TRUE, /* pc_relative */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 FALSE, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 TRUE), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19, /* type */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
811 TRUE, /* pc_relative */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 FALSE, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 TRUE), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6, /* type */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
825 TRUE, /* pc_relative */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 FALSE, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 TRUE), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
842 TRUE, /* pc_relative */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 FALSE, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 TRUE), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12, /* type */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
856 TRUE, /* pc_relative */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 FALSE, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 TRUE), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI, /* type */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
870 FALSE, /* pc_relative */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 FALSE, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 FALSE), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI, /* type */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
884 TRUE, /* pc_relative */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 FALSE, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 FALSE), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
900 TRUE, /* pc_relative */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 FALSE, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 TRUE), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
914 TRUE, /* pc_relative */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 FALSE, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 TRUE), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
928 TRUE, /* pc_relative */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 FALSE, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 TRUE), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
942 TRUE, /* pc_relative */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 FALSE, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 TRUE), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
956 TRUE, /* pc_relative */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 FALSE, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 TRUE), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
970 TRUE, /* pc_relative */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 FALSE, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 TRUE), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
984 TRUE, /* pc_relative */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 FALSE, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 TRUE), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
998 TRUE, /* pc_relative */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 FALSE, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 TRUE), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1012 TRUE, /* pc_relative */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 FALSE, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 TRUE), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 TRUE, /* pc_relative */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 TRUE), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 TRUE, /* pc_relative */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 TRUE), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 TRUE, /* pc_relative */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 FALSE, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 TRUE), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 TRUE, /* pc_relative */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 FALSE, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 TRUE), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 TRUE, /* pc_relative */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 FALSE, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 TRUE), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 TRUE, /* pc_relative */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 TRUE, /* pc_relative */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1124 TRUE, /* pc_relative */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 TRUE), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1138 TRUE, /* pc_relative */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 TRUE), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 TRUE, /* pc_relative */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 TRUE), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 TRUE, /* pc_relative */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 FALSE, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 TRUE), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1180 TRUE, /* pc_relative */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 FALSE, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 TRUE), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 TRUE, /* pc_relative */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 FALSE, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 TRUE), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1208 TRUE, /* pc_relative */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 FALSE, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 TRUE), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1222 TRUE, /* pc_relative */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 FALSE, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 TRUE), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1236 TRUE, /* pc_relative */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 FALSE, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 TRUE), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1250 TRUE, /* pc_relative */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 FALSE, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 TRUE), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1264 TRUE, /* pc_relative */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 FALSE, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 TRUE), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1280 FALSE, /* pc_relative */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 FALSE, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 FALSE), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1294 FALSE, /* pc_relative */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 FALSE, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 FALSE), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1308 FALSE, /* pc_relative */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 FALSE, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 FALSE), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1322 FALSE, /* pc_relative */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 FALSE, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 FALSE), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1336 FALSE, /* pc_relative */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 FALSE, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 FALSE), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1350 FALSE, /* pc_relative */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 FALSE, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 FALSE), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 FALSE, /* pc_relative */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 TRUE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 FALSE, /* pc_relative */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 FALSE, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 FALSE, /* pc_relative */
1394 complain_overflow_bitfield,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 FALSE, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 FALSE), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 FALSE, /* pc_relative */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 FALSE, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 FALSE, /* pc_relative */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 FALSE, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1434 FALSE, /* pc_relative */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 FALSE, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 FALSE), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1448 TRUE, /* pc_relative */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 FALSE, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 TRUE), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1462 FALSE, /* pc_relative */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 FALSE, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1476 FALSE, /* pc_relative */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 FALSE, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1493 FALSE, /* pc_relative */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 FALSE, /* partial_inplace */
1501 FALSE), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 FALSE, /* pc_relative */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 FALSE, /* partial_inplace */
1516 FALSE), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1522 TRUE, /* pc_relative */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 FALSE, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 TRUE), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1536 TRUE, /* pc_relative */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 FALSE, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 TRUE), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 FALSE, /* pc_relative */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 FALSE, /* pc_relative */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 TRUE, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 FALSE, /* pc_relative */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 TRUE, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 FALSE, /* pc_relative */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 TRUE, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1607 FALSE, /* pc_relative */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 bfd_elf_generic_reloc, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 TRUE, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 FALSE), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1621 FALSE, /* pc_relative */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 FALSE, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 FALSE), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1635 FALSE, /* pc_relative */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 FALSE, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 FALSE), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1649 FALSE, /* pc_relative */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 FALSE, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 FALSE), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1677 /* R_ARM_ME_TOO, obsolete. */
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1684 FALSE, /* pc_relative */
1686 complain_overflow_bitfield,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 FALSE, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 FALSE), /* pcrel_offset */
1696 static reloc_howto_type elf32_arm_howto_table_2[1] =
1698 HOWTO (R_ARM_IRELATIVE, /* type */
1700 2, /* size (0 = byte, 1 = short, 2 = long) */
1702 FALSE, /* pc_relative */
1704 complain_overflow_bitfield,/* complain_on_overflow */
1705 bfd_elf_generic_reloc, /* special_function */
1706 "R_ARM_IRELATIVE", /* name */
1707 TRUE, /* partial_inplace */
1708 0xffffffff, /* src_mask */
1709 0xffffffff, /* dst_mask */
1710 FALSE) /* pcrel_offset */
1713 /* 249-255 extended, currently unused, relocations: */
1714 static reloc_howto_type elf32_arm_howto_table_3[4] =
1716 HOWTO (R_ARM_RREL32, /* type */
1718 0, /* size (0 = byte, 1 = short, 2 = long) */
1720 FALSE, /* pc_relative */
1722 complain_overflow_dont,/* complain_on_overflow */
1723 bfd_elf_generic_reloc, /* special_function */
1724 "R_ARM_RREL32", /* name */
1725 FALSE, /* partial_inplace */
1728 FALSE), /* pcrel_offset */
1730 HOWTO (R_ARM_RABS32, /* type */
1732 0, /* size (0 = byte, 1 = short, 2 = long) */
1734 FALSE, /* pc_relative */
1736 complain_overflow_dont,/* complain_on_overflow */
1737 bfd_elf_generic_reloc, /* special_function */
1738 "R_ARM_RABS32", /* name */
1739 FALSE, /* partial_inplace */
1742 FALSE), /* pcrel_offset */
1744 HOWTO (R_ARM_RPC24, /* type */
1746 0, /* size (0 = byte, 1 = short, 2 = long) */
1748 FALSE, /* pc_relative */
1750 complain_overflow_dont,/* complain_on_overflow */
1751 bfd_elf_generic_reloc, /* special_function */
1752 "R_ARM_RPC24", /* name */
1753 FALSE, /* partial_inplace */
1756 FALSE), /* pcrel_offset */
1758 HOWTO (R_ARM_RBASE, /* type */
1760 0, /* size (0 = byte, 1 = short, 2 = long) */
1762 FALSE, /* pc_relative */
1764 complain_overflow_dont,/* complain_on_overflow */
1765 bfd_elf_generic_reloc, /* special_function */
1766 "R_ARM_RBASE", /* name */
1767 FALSE, /* partial_inplace */
1770 FALSE) /* pcrel_offset */
1773 static reloc_howto_type *
1774 elf32_arm_howto_from_type (unsigned int r_type)
1776 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1777 return &elf32_arm_howto_table_1[r_type];
1779 if (r_type == R_ARM_IRELATIVE)
1780 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1782 if (r_type >= R_ARM_RREL32
1783 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1784 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1790 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1791 Elf_Internal_Rela * elf_reloc)
1793 unsigned int r_type;
1795 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1796 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1799 struct elf32_arm_reloc_map
1801 bfd_reloc_code_real_type bfd_reloc_val;
1802 unsigned char elf_reloc_val;
1805 /* All entries in this list must also be present in elf32_arm_howto_table. */
1806 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1808 {BFD_RELOC_NONE, R_ARM_NONE},
1809 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1810 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1811 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1812 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1813 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1814 {BFD_RELOC_32, R_ARM_ABS32},
1815 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1816 {BFD_RELOC_8, R_ARM_ABS8},
1817 {BFD_RELOC_16, R_ARM_ABS16},
1818 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1819 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1823 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1824 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1825 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1826 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1827 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1828 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1829 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1830 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1831 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1832 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1833 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1834 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1835 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1836 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1837 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1838 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1839 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1840 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1841 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1842 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1843 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1844 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1845 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1846 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1847 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1848 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1849 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1850 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1851 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1852 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1853 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1854 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1855 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1856 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1857 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1858 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1859 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1860 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1861 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1862 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1863 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1864 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1865 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1866 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1867 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1868 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1869 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1870 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1871 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1872 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1873 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1874 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1875 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1876 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1877 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1878 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1879 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1880 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1881 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1882 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1883 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1884 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1885 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1886 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1887 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1888 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1889 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1890 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1891 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1892 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1893 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1896 static reloc_howto_type *
1897 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1898 bfd_reloc_code_real_type code)
1902 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1903 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1904 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1909 static reloc_howto_type *
1910 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1915 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1916 if (elf32_arm_howto_table_1[i].name != NULL
1917 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1918 return &elf32_arm_howto_table_1[i];
1920 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1921 if (elf32_arm_howto_table_2[i].name != NULL
1922 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1923 return &elf32_arm_howto_table_2[i];
1925 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1926 if (elf32_arm_howto_table_3[i].name != NULL
1927 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1928 return &elf32_arm_howto_table_3[i];
1933 /* Support for core dump NOTE sections. */
1936 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1941 switch (note->descsz)
1946 case 148: /* Linux/ARM 32-bit. */
1948 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1951 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1960 /* Make a ".reg/999" section. */
1961 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1962 size, note->descpos + offset);
1966 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1968 switch (note->descsz)
1973 case 124: /* Linux/ARM elf_prpsinfo. */
1974 elf_tdata (abfd)->core_pid
1975 = bfd_get_32 (abfd, note->descdata + 12);
1976 elf_tdata (abfd)->core_program
1977 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1978 elf_tdata (abfd)->core_command
1979 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1982 /* Note that for some reason, a spurious space is tacked
1983 onto the end of the args in some (at least one anyway)
1984 implementations, so strip it off if it exists. */
1986 char *command = elf_tdata (abfd)->core_command;
1987 int n = strlen (command);
1989 if (0 < n && command[n - 1] == ' ')
1990 command[n - 1] = '\0';
1997 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2010 va_start (ap, note_type);
2011 memset (data, 0, sizeof (data));
2012 strncpy (data + 28, va_arg (ap, const char *), 16);
2013 strncpy (data + 44, va_arg (ap, const char *), 80);
2016 return elfcore_write_note (abfd, buf, bufsiz,
2017 "CORE", note_type, data, sizeof (data));
2028 va_start (ap, note_type);
2029 memset (data, 0, sizeof (data));
2030 pid = va_arg (ap, long);
2031 bfd_put_32 (abfd, pid, data + 24);
2032 cursig = va_arg (ap, int);
2033 bfd_put_16 (abfd, cursig, data + 12);
2034 greg = va_arg (ap, const void *);
2035 memcpy (data + 72, greg, 72);
2038 return elfcore_write_note (abfd, buf, bufsiz,
2039 "CORE", note_type, data, sizeof (data));
2044 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
2045 #define TARGET_LITTLE_NAME "elf32-littlearm"
2046 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
2047 #define TARGET_BIG_NAME "elf32-bigarm"
2049 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2050 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2051 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2053 typedef unsigned long int insn32;
2054 typedef unsigned short int insn16;
2056 /* In lieu of proper flags, assume all EABIv4 or later objects are
2058 #define INTERWORK_FLAG(abfd) \
2059 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2060 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2061 || ((abfd)->flags & BFD_LINKER_CREATED))
2063 /* The linker script knows the section names for placement.
2064 The entry_names are used to do simple name mangling on the stubs.
2065 Given a function name, and its type, the stub can be found. The
2066 name can be changed. The only requirement is the %s be present. */
2067 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2068 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2070 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2071 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2073 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2074 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2076 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2077 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2079 #define STUB_ENTRY_NAME "__%s_veneer"
2081 /* The name of the dynamic interpreter. This is put in the .interp
2083 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2085 static const unsigned long tls_trampoline [] =
2087 0xe08e0000, /* add r0, lr, r0 */
2088 0xe5901004, /* ldr r1, [r0,#4] */
2089 0xe12fff11, /* bx r1 */
2092 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2094 0xe52d2004, /* push {r2} */
2095 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2096 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2097 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2098 0xe081100f, /* 2: add r1, pc */
2099 0xe12fff12, /* bx r2 */
2100 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2101 + dl_tlsdesc_lazy_resolver(GOT) */
2102 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2105 #ifdef FOUR_WORD_PLT
2107 /* The first entry in a procedure linkage table looks like
2108 this. It is set up so that any shared library function that is
2109 called before the relocation has been set up calls the dynamic
2111 static const bfd_vma elf32_arm_plt0_entry [] =
2113 0xe52de004, /* str lr, [sp, #-4]! */
2114 0xe59fe010, /* ldr lr, [pc, #16] */
2115 0xe08fe00e, /* add lr, pc, lr */
2116 0xe5bef008, /* ldr pc, [lr, #8]! */
2119 /* Subsequent entries in a procedure linkage table look like
2121 static const bfd_vma elf32_arm_plt_entry [] =
2123 0xe28fc600, /* add ip, pc, #NN */
2124 0xe28cca00, /* add ip, ip, #NN */
2125 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2126 0x00000000, /* unused */
2131 /* The first entry in a procedure linkage table looks like
2132 this. It is set up so that any shared library function that is
2133 called before the relocation has been set up calls the dynamic
2135 static const bfd_vma elf32_arm_plt0_entry [] =
2137 0xe52de004, /* str lr, [sp, #-4]! */
2138 0xe59fe004, /* ldr lr, [pc, #4] */
2139 0xe08fe00e, /* add lr, pc, lr */
2140 0xe5bef008, /* ldr pc, [lr, #8]! */
2141 0x00000000, /* &GOT[0] - . */
2144 /* Subsequent entries in a procedure linkage table look like
2146 static const bfd_vma elf32_arm_plt_entry [] =
2148 0xe28fc600, /* add ip, pc, #0xNN00000 */
2149 0xe28cca00, /* add ip, ip, #0xNN000 */
2150 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2155 /* The format of the first entry in the procedure linkage table
2156 for a VxWorks executable. */
2157 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2159 0xe52dc008, /* str ip,[sp,#-8]! */
2160 0xe59fc000, /* ldr ip,[pc] */
2161 0xe59cf008, /* ldr pc,[ip,#8] */
2162 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2165 /* The format of subsequent entries in a VxWorks executable. */
2166 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2168 0xe59fc000, /* ldr ip,[pc] */
2169 0xe59cf000, /* ldr pc,[ip] */
2170 0x00000000, /* .long @got */
2171 0xe59fc000, /* ldr ip,[pc] */
2172 0xea000000, /* b _PLT */
2173 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2176 /* The format of entries in a VxWorks shared library. */
2177 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2179 0xe59fc000, /* ldr ip,[pc] */
2180 0xe79cf009, /* ldr pc,[ip,r9] */
2181 0x00000000, /* .long @got */
2182 0xe59fc000, /* ldr ip,[pc] */
2183 0xe599f008, /* ldr pc,[r9,#8] */
2184 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2187 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2188 #define PLT_THUMB_STUB_SIZE 4
2189 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2195 /* The entries in a PLT when using a DLL-based target with multiple
2197 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2199 0xe51ff004, /* ldr pc, [pc, #-4] */
2200 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2203 /* The first entry in a procedure linkage table looks like
2204 this. It is set up so that any shared library function that is
2205 called before the relocation has been set up calls the dynamic
2207 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2210 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2211 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2212 0xe08cc00f, /* add ip, ip, pc */
2213 0xe52dc008, /* str ip, [sp, #-8]! */
2214 /* Second bundle: */
2215 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2216 0xe59cc000, /* ldr ip, [ip] */
2217 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2218 0xe12fff1c, /* bx ip */
2220 0xe320f000, /* nop */
2221 0xe320f000, /* nop */
2222 0xe320f000, /* nop */
2224 0xe50dc004, /* str ip, [sp, #-4] */
2225 /* Fourth bundle: */
2226 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2227 0xe59cc000, /* ldr ip, [ip] */
2228 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2229 0xe12fff1c, /* bx ip */
2231 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2233 /* Subsequent entries in a procedure linkage table look like this. */
2234 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2236 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2237 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2238 0xe08cc00f, /* add ip, ip, pc */
2239 0xea000000, /* b .Lplt_tail */
2242 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2243 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2244 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2245 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2246 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2247 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2257 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2258 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2259 is inserted in arm_build_one_stub(). */
2260 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2261 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2262 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2263 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2264 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2265 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2270 enum stub_insn_type type;
2271 unsigned int r_type;
2275 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2276 to reach the stub if necessary. */
2277 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2279 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2280 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2283 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2285 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2287 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2288 ARM_INSN (0xe12fff1c), /* bx ip */
2289 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2292 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2293 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2295 THUMB16_INSN (0xb401), /* push {r0} */
2296 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2297 THUMB16_INSN (0x4684), /* mov ip, r0 */
2298 THUMB16_INSN (0xbc01), /* pop {r0} */
2299 THUMB16_INSN (0x4760), /* bx ip */
2300 THUMB16_INSN (0xbf00), /* nop */
2301 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2304 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2306 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2308 THUMB16_INSN (0x4778), /* bx pc */
2309 THUMB16_INSN (0x46c0), /* nop */
2310 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2311 ARM_INSN (0xe12fff1c), /* bx ip */
2312 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2315 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2317 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2319 THUMB16_INSN (0x4778), /* bx pc */
2320 THUMB16_INSN (0x46c0), /* nop */
2321 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2322 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2325 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2326 one, when the destination is close enough. */
2327 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2329 THUMB16_INSN (0x4778), /* bx pc */
2330 THUMB16_INSN (0x46c0), /* nop */
2331 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2334 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2335 blx to reach the stub if necessary. */
2336 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2338 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2339 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2340 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2343 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2344 blx to reach the stub if necessary. We can not add into pc;
2345 it is not guaranteed to mode switch (different in ARMv6 and
2347 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2349 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2350 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2351 ARM_INSN (0xe12fff1c), /* bx ip */
2352 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2355 /* V4T ARM -> ARM long branch stub, PIC. */
2356 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2358 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2359 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2360 ARM_INSN (0xe12fff1c), /* bx ip */
2361 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2364 /* V4T Thumb -> ARM long branch stub, PIC. */
2365 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2367 THUMB16_INSN (0x4778), /* bx pc */
2368 THUMB16_INSN (0x46c0), /* nop */
2369 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2370 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2371 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2374 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2376 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2378 THUMB16_INSN (0xb401), /* push {r0} */
2379 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2380 THUMB16_INSN (0x46fc), /* mov ip, pc */
2381 THUMB16_INSN (0x4484), /* add ip, r0 */
2382 THUMB16_INSN (0xbc01), /* pop {r0} */
2383 THUMB16_INSN (0x4760), /* bx ip */
2384 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2387 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2389 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2391 THUMB16_INSN (0x4778), /* bx pc */
2392 THUMB16_INSN (0x46c0), /* nop */
2393 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2394 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2395 ARM_INSN (0xe12fff1c), /* bx ip */
2396 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2399 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2400 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2401 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2403 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2404 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2405 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2408 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2409 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2410 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2412 THUMB16_INSN (0x4778), /* bx pc */
2413 THUMB16_INSN (0x46c0), /* nop */
2414 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2415 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2416 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2419 /* Cortex-A8 erratum-workaround stubs. */
2421 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2422 can't use a conditional branch to reach this stub). */
2424 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2426 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2427 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2428 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2431 /* Stub used for b.w and bl.w instructions. */
2433 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2435 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2438 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2440 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2443 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2444 instruction (which switches to ARM mode) to point to this stub. Jump to the
2445 real destination using an ARM-mode branch. */
2447 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2449 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2452 /* For each section group there can be a specially created linker section
2453 to hold the stubs for that group. The name of the stub section is based
2454 upon the name of another section within that group with the suffix below
2457 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2458 create what appeared to be a linker stub section when it actually
2459 contained user code/data. For example, consider this fragment:
2461 const char * stubborn_problems[] = { "np" };
2463 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2466 .data.rel.local.stubborn_problems
2468 This then causes problems in arm32_arm_build_stubs() as it triggers:
2470 // Ignore non-stub sections.
2471 if (!strstr (stub_sec->name, STUB_SUFFIX))
2474 And so the section would be ignored instead of being processed. Hence
2475 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2477 #define STUB_SUFFIX ".__stub"
2479 /* One entry per long/short branch stub defined above. */
2481 DEF_STUB(long_branch_any_any) \
2482 DEF_STUB(long_branch_v4t_arm_thumb) \
2483 DEF_STUB(long_branch_thumb_only) \
2484 DEF_STUB(long_branch_v4t_thumb_thumb) \
2485 DEF_STUB(long_branch_v4t_thumb_arm) \
2486 DEF_STUB(short_branch_v4t_thumb_arm) \
2487 DEF_STUB(long_branch_any_arm_pic) \
2488 DEF_STUB(long_branch_any_thumb_pic) \
2489 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2490 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2491 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2492 DEF_STUB(long_branch_thumb_only_pic) \
2493 DEF_STUB(long_branch_any_tls_pic) \
2494 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2495 DEF_STUB(a8_veneer_b_cond) \
2496 DEF_STUB(a8_veneer_b) \
2497 DEF_STUB(a8_veneer_bl) \
2498 DEF_STUB(a8_veneer_blx)
2500 #define DEF_STUB(x) arm_stub_##x,
2501 enum elf32_arm_stub_type
2505 /* Note the first a8_veneer type */
2506 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2512 const insn_sequence* template_sequence;
2516 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2517 static const stub_def stub_definitions[] =
2523 struct elf32_arm_stub_hash_entry
2525 /* Base hash table entry structure. */
2526 struct bfd_hash_entry root;
2528 /* The stub section. */
2531 /* Offset within stub_sec of the beginning of this stub. */
2532 bfd_vma stub_offset;
2534 /* Given the symbol's value and its section we can determine its final
2535 value when building the stubs (so the stub knows where to jump). */
2536 bfd_vma target_value;
2537 asection *target_section;
2539 /* Offset to apply to relocation referencing target_value. */
2540 bfd_vma target_addend;
2542 /* The instruction which caused this stub to be generated (only valid for
2543 Cortex-A8 erratum workaround stubs at present). */
2544 unsigned long orig_insn;
2546 /* The stub type. */
2547 enum elf32_arm_stub_type stub_type;
2548 /* Its encoding size in bytes. */
2551 const insn_sequence *stub_template;
2552 /* The size of the template (number of entries). */
2553 int stub_template_size;
2555 /* The symbol table entry, if any, that this was derived from. */
2556 struct elf32_arm_link_hash_entry *h;
2558 /* Type of branch. */
2559 enum arm_st_branch_type branch_type;
2561 /* Where this stub is being called from, or, in the case of combined
2562 stub sections, the first input section in the group. */
2565 /* The name for the local symbol at the start of this stub. The
2566 stub name in the hash table has to be unique; this does not, so
2567 it can be friendlier. */
2571 /* Used to build a map of a section. This is required for mixed-endian
2574 typedef struct elf32_elf_section_map
2579 elf32_arm_section_map;
2581 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2585 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2586 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2587 VFP11_ERRATUM_ARM_VENEER,
2588 VFP11_ERRATUM_THUMB_VENEER
2590 elf32_vfp11_erratum_type;
2592 typedef struct elf32_vfp11_erratum_list
2594 struct elf32_vfp11_erratum_list *next;
2600 struct elf32_vfp11_erratum_list *veneer;
2601 unsigned int vfp_insn;
2605 struct elf32_vfp11_erratum_list *branch;
2609 elf32_vfp11_erratum_type type;
2611 elf32_vfp11_erratum_list;
2616 INSERT_EXIDX_CANTUNWIND_AT_END
2618 arm_unwind_edit_type;
2620 /* A (sorted) list of edits to apply to an unwind table. */
2621 typedef struct arm_unwind_table_edit
2623 arm_unwind_edit_type type;
2624 /* Note: we sometimes want to insert an unwind entry corresponding to a
2625 section different from the one we're currently writing out, so record the
2626 (text) section this edit relates to here. */
2627 asection *linked_section;
2629 struct arm_unwind_table_edit *next;
2631 arm_unwind_table_edit;
2633 typedef struct _arm_elf_section_data
2635 /* Information about mapping symbols. */
2636 struct bfd_elf_section_data elf;
2637 unsigned int mapcount;
2638 unsigned int mapsize;
2639 elf32_arm_section_map *map;
2640 /* Information about CPU errata. */
2641 unsigned int erratumcount;
2642 elf32_vfp11_erratum_list *erratumlist;
2643 /* Information about unwind tables. */
2646 /* Unwind info attached to a text section. */
2649 asection *arm_exidx_sec;
2652 /* Unwind info attached to an .ARM.exidx section. */
2655 arm_unwind_table_edit *unwind_edit_list;
2656 arm_unwind_table_edit *unwind_edit_tail;
2660 _arm_elf_section_data;
2662 #define elf32_arm_section_data(sec) \
2663 ((_arm_elf_section_data *) elf_section_data (sec))
2665 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2666 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2667 so may be created multiple times: we use an array of these entries whilst
2668 relaxing which we can refresh easily, then create stubs for each potentially
2669 erratum-triggering instruction once we've settled on a solution. */
2671 struct a8_erratum_fix
2677 unsigned long orig_insn;
2679 enum elf32_arm_stub_type stub_type;
2680 enum arm_st_branch_type branch_type;
2683 /* A table of relocs applied to branches which might trigger Cortex-A8
2686 struct a8_erratum_reloc
2689 bfd_vma destination;
2690 struct elf32_arm_link_hash_entry *hash;
2691 const char *sym_name;
2692 unsigned int r_type;
2693 enum arm_st_branch_type branch_type;
2694 bfd_boolean non_a8_stub;
2697 /* The size of the thread control block. */
2700 /* ARM-specific information about a PLT entry, over and above the usual
2704 /* We reference count Thumb references to a PLT entry separately,
2705 so that we can emit the Thumb trampoline only if needed. */
2706 bfd_signed_vma thumb_refcount;
2708 /* Some references from Thumb code may be eliminated by BL->BLX
2709 conversion, so record them separately. */
2710 bfd_signed_vma maybe_thumb_refcount;
2712 /* How many of the recorded PLT accesses were from non-call relocations.
2713 This information is useful when deciding whether anything takes the
2714 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2715 non-call references to the function should resolve directly to the
2716 real runtime target. */
2717 unsigned int noncall_refcount;
2719 /* Since PLT entries have variable size if the Thumb prologue is
2720 used, we need to record the index into .got.plt instead of
2721 recomputing it from the PLT offset. */
2722 bfd_signed_vma got_offset;
2725 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2726 struct arm_local_iplt_info
2728 /* The information that is usually found in the generic ELF part of
2729 the hash table entry. */
2730 union gotplt_union root;
2732 /* The information that is usually found in the ARM-specific part of
2733 the hash table entry. */
2734 struct arm_plt_info arm;
2736 /* A list of all potential dynamic relocations against this symbol. */
2737 struct elf_dyn_relocs *dyn_relocs;
2740 struct elf_arm_obj_tdata
2742 struct elf_obj_tdata root;
2744 /* tls_type for each local got entry. */
2745 char *local_got_tls_type;
2747 /* GOTPLT entries for TLS descriptors. */
2748 bfd_vma *local_tlsdesc_gotent;
2750 /* Information for local symbols that need entries in .iplt. */
2751 struct arm_local_iplt_info **local_iplt;
2753 /* Zero to warn when linking objects with incompatible enum sizes. */
2754 int no_enum_size_warning;
2756 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2757 int no_wchar_size_warning;
2760 #define elf_arm_tdata(bfd) \
2761 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2763 #define elf32_arm_local_got_tls_type(bfd) \
2764 (elf_arm_tdata (bfd)->local_got_tls_type)
2766 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2767 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2769 #define elf32_arm_local_iplt(bfd) \
2770 (elf_arm_tdata (bfd)->local_iplt)
2772 #define is_arm_elf(bfd) \
2773 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2774 && elf_tdata (bfd) != NULL \
2775 && elf_object_id (bfd) == ARM_ELF_DATA)
2778 elf32_arm_mkobject (bfd *abfd)
2780 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2784 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2786 /* Arm ELF linker hash entry. */
2787 struct elf32_arm_link_hash_entry
2789 struct elf_link_hash_entry root;
2791 /* Track dynamic relocs copied for this symbol. */
2792 struct elf_dyn_relocs *dyn_relocs;
2794 /* ARM-specific PLT information. */
2795 struct arm_plt_info plt;
2797 #define GOT_UNKNOWN 0
2798 #define GOT_NORMAL 1
2799 #define GOT_TLS_GD 2
2800 #define GOT_TLS_IE 4
2801 #define GOT_TLS_GDESC 8
2802 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2803 unsigned int tls_type : 8;
2805 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2806 unsigned int is_iplt : 1;
2808 unsigned int unused : 23;
2810 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2811 starting at the end of the jump table. */
2812 bfd_vma tlsdesc_got;
2814 /* The symbol marking the real symbol location for exported thumb
2815 symbols with Arm stubs. */
2816 struct elf_link_hash_entry *export_glue;
2818 /* A pointer to the most recently used stub hash entry against this
2820 struct elf32_arm_stub_hash_entry *stub_cache;
2823 /* Traverse an arm ELF linker hash table. */
2824 #define elf32_arm_link_hash_traverse(table, func, info) \
2825 (elf_link_hash_traverse \
2827 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2830 /* Get the ARM elf linker hash table from a link_info structure. */
2831 #define elf32_arm_hash_table(info) \
2832 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2833 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2835 #define arm_stub_hash_lookup(table, string, create, copy) \
2836 ((struct elf32_arm_stub_hash_entry *) \
2837 bfd_hash_lookup ((table), (string), (create), (copy)))
2839 /* Array to keep track of which stub sections have been created, and
2840 information on stub grouping. */
2843 /* This is the section to which stubs in the group will be
2846 /* The stub section. */
2850 #define elf32_arm_compute_jump_table_size(htab) \
2851 ((htab)->next_tls_desc_index * 4)
2853 /* ARM ELF linker hash table. */
2854 struct elf32_arm_link_hash_table
2856 /* The main hash table. */
2857 struct elf_link_hash_table root;
2859 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2860 bfd_size_type thumb_glue_size;
2862 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2863 bfd_size_type arm_glue_size;
2865 /* The size in bytes of section containing the ARMv4 BX veneers. */
2866 bfd_size_type bx_glue_size;
2868 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2869 veneer has been populated. */
2870 bfd_vma bx_glue_offset[15];
2872 /* The size in bytes of the section containing glue for VFP11 erratum
2874 bfd_size_type vfp11_erratum_glue_size;
2876 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2877 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2878 elf32_arm_write_section(). */
2879 struct a8_erratum_fix *a8_erratum_fixes;
2880 unsigned int num_a8_erratum_fixes;
2882 /* An arbitrary input BFD chosen to hold the glue sections. */
2883 bfd * bfd_of_glue_owner;
2885 /* Nonzero to output a BE8 image. */
2888 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2889 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2892 /* The relocation to use for R_ARM_TARGET2 relocations. */
2895 /* 0 = Ignore R_ARM_V4BX.
2896 1 = Convert BX to MOV PC.
2897 2 = Generate v4 interworing stubs. */
2900 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2903 /* Whether we should fix the ARM1176 BLX immediate issue. */
2906 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2909 /* What sort of code sequences we should look for which may trigger the
2910 VFP11 denorm erratum. */
2911 bfd_arm_vfp11_fix vfp11_fix;
2913 /* Global counter for the number of fixes we have emitted. */
2914 int num_vfp11_fixes;
2916 /* Nonzero to force PIC branch veneers. */
2919 /* The number of bytes in the initial entry in the PLT. */
2920 bfd_size_type plt_header_size;
2922 /* The number of bytes in the subsequent PLT etries. */
2923 bfd_size_type plt_entry_size;
2925 /* True if the target system is VxWorks. */
2928 /* True if the target system is Symbian OS. */
2931 /* True if the target system is Native Client. */
2934 /* True if the target uses REL relocations. */
2937 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
2938 bfd_vma next_tls_desc_index;
2940 /* How many R_ARM_TLS_DESC relocations were generated so far. */
2941 bfd_vma num_tls_desc;
2943 /* Short-cuts to get to dynamic linker sections. */
2947 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2950 /* The offset into splt of the PLT entry for the TLS descriptor
2951 resolver. Special values are 0, if not necessary (or not found
2952 to be necessary yet), and -1 if needed but not determined
2954 bfd_vma dt_tlsdesc_plt;
2956 /* The offset into sgot of the GOT entry used by the PLT entry
2958 bfd_vma dt_tlsdesc_got;
2960 /* Offset in .plt section of tls_arm_trampoline. */
2961 bfd_vma tls_trampoline;
2963 /* Data for R_ARM_TLS_LDM32 relocations. */
2966 bfd_signed_vma refcount;
2970 /* Small local sym cache. */
2971 struct sym_cache sym_cache;
2973 /* For convenience in allocate_dynrelocs. */
2976 /* The amount of space used by the reserved portion of the sgotplt
2977 section, plus whatever space is used by the jump slots. */
2978 bfd_vma sgotplt_jump_table_size;
2980 /* The stub hash table. */
2981 struct bfd_hash_table stub_hash_table;
2983 /* Linker stub bfd. */
2986 /* Linker call-backs. */
2987 asection * (*add_stub_section) (const char *, asection *);
2988 void (*layout_sections_again) (void);
2990 /* Array to keep track of which stub sections have been created, and
2991 information on stub grouping. */
2992 struct map_stub *stub_group;
2994 /* Number of elements in stub_group. */
2997 /* Assorted information used by elf32_arm_size_stubs. */
2998 unsigned int bfd_count;
3000 asection **input_list;
3003 /* Create an entry in an ARM ELF linker hash table. */
3005 static struct bfd_hash_entry *
3006 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3007 struct bfd_hash_table * table,
3008 const char * string)
3010 struct elf32_arm_link_hash_entry * ret =
3011 (struct elf32_arm_link_hash_entry *) entry;
3013 /* Allocate the structure if it has not already been allocated by a
3016 ret = (struct elf32_arm_link_hash_entry *)
3017 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3019 return (struct bfd_hash_entry *) ret;
3021 /* Call the allocation method of the superclass. */
3022 ret = ((struct elf32_arm_link_hash_entry *)
3023 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3027 ret->dyn_relocs = NULL;
3028 ret->tls_type = GOT_UNKNOWN;
3029 ret->tlsdesc_got = (bfd_vma) -1;
3030 ret->plt.thumb_refcount = 0;
3031 ret->plt.maybe_thumb_refcount = 0;
3032 ret->plt.noncall_refcount = 0;
3033 ret->plt.got_offset = -1;
3034 ret->is_iplt = FALSE;
3035 ret->export_glue = NULL;
3037 ret->stub_cache = NULL;
3040 return (struct bfd_hash_entry *) ret;
3043 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3047 elf32_arm_allocate_local_sym_info (bfd *abfd)
3049 if (elf_local_got_refcounts (abfd) == NULL)
3051 bfd_size_type num_syms;
3055 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3056 size = num_syms * (sizeof (bfd_signed_vma)
3057 + sizeof (struct arm_local_iplt_info *)
3060 data = bfd_zalloc (abfd, size);
3064 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3065 data += num_syms * sizeof (bfd_signed_vma);
3067 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3068 data += num_syms * sizeof (struct arm_local_iplt_info *);
3070 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3071 data += num_syms * sizeof (bfd_vma);
3073 elf32_arm_local_got_tls_type (abfd) = data;
3078 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3079 to input bfd ABFD. Create the information if it doesn't already exist.
3080 Return null if an allocation fails. */
3082 static struct arm_local_iplt_info *
3083 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3085 struct arm_local_iplt_info **ptr;
3087 if (!elf32_arm_allocate_local_sym_info (abfd))
3090 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3091 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3093 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3097 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3098 in ABFD's symbol table. If the symbol is global, H points to its
3099 hash table entry, otherwise H is null.
3101 Return true if the symbol does have PLT information. When returning
3102 true, point *ROOT_PLT at the target-independent reference count/offset
3103 union and *ARM_PLT at the ARM-specific information. */
3106 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3107 unsigned long r_symndx, union gotplt_union **root_plt,
3108 struct arm_plt_info **arm_plt)
3110 struct arm_local_iplt_info *local_iplt;
3114 *root_plt = &h->root.plt;
3119 if (elf32_arm_local_iplt (abfd) == NULL)
3122 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3123 if (local_iplt == NULL)
3126 *root_plt = &local_iplt->root;
3127 *arm_plt = &local_iplt->arm;
3131 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3135 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3136 struct arm_plt_info *arm_plt)
3138 struct elf32_arm_link_hash_table *htab;
3140 htab = elf32_arm_hash_table (info);
3141 return (arm_plt->thumb_refcount != 0
3142 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3145 /* Return a pointer to the head of the dynamic reloc list that should
3146 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3147 ABFD's symbol table. Return null if an error occurs. */
3149 static struct elf_dyn_relocs **
3150 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3151 Elf_Internal_Sym *isym)
3153 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3155 struct arm_local_iplt_info *local_iplt;
3157 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3158 if (local_iplt == NULL)
3160 return &local_iplt->dyn_relocs;
3164 /* Track dynamic relocs needed for local syms too.
3165 We really need local syms available to do this
3170 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3174 vpp = &elf_section_data (s)->local_dynrel;
3175 return (struct elf_dyn_relocs **) vpp;
3179 /* Initialize an entry in the stub hash table. */
3181 static struct bfd_hash_entry *
3182 stub_hash_newfunc (struct bfd_hash_entry *entry,
3183 struct bfd_hash_table *table,
3186 /* Allocate the structure if it has not already been allocated by a
3190 entry = (struct bfd_hash_entry *)
3191 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3196 /* Call the allocation method of the superclass. */
3197 entry = bfd_hash_newfunc (entry, table, string);
3200 struct elf32_arm_stub_hash_entry *eh;
3202 /* Initialize the local fields. */
3203 eh = (struct elf32_arm_stub_hash_entry *) entry;
3204 eh->stub_sec = NULL;
3205 eh->stub_offset = 0;
3206 eh->target_value = 0;
3207 eh->target_section = NULL;
3208 eh->target_addend = 0;
3210 eh->stub_type = arm_stub_none;
3212 eh->stub_template = NULL;
3213 eh->stub_template_size = 0;
3216 eh->output_name = NULL;
3222 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3223 shortcuts to them in our hash table. */
3226 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3228 struct elf32_arm_link_hash_table *htab;
3230 htab = elf32_arm_hash_table (info);
3234 /* BPABI objects never have a GOT, or associated sections. */
3235 if (htab->symbian_p)
3238 if (! _bfd_elf_create_got_section (dynobj, info))
3244 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3247 create_ifunc_sections (struct bfd_link_info *info)
3249 struct elf32_arm_link_hash_table *htab;
3250 const struct elf_backend_data *bed;
3255 htab = elf32_arm_hash_table (info);
3256 dynobj = htab->root.dynobj;
3257 bed = get_elf_backend_data (dynobj);
3258 flags = bed->dynamic_sec_flags;
3260 if (htab->root.iplt == NULL)
3262 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3263 flags | SEC_READONLY | SEC_CODE);
3265 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3267 htab->root.iplt = s;
3270 if (htab->root.irelplt == NULL)
3272 s = bfd_make_section_anyway_with_flags (dynobj,
3273 RELOC_SECTION (htab, ".iplt"),
3274 flags | SEC_READONLY);
3276 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3278 htab->root.irelplt = s;
3281 if (htab->root.igotplt == NULL)
3283 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3285 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3287 htab->root.igotplt = s;
3292 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3293 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3297 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3299 struct elf32_arm_link_hash_table *htab;
3301 htab = elf32_arm_hash_table (info);
3305 if (!htab->root.sgot && !create_got_section (dynobj, info))
3308 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3311 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3313 htab->srelbss = bfd_get_linker_section (dynobj,
3314 RELOC_SECTION (htab, ".bss"));
3316 if (htab->vxworks_p)
3318 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3323 htab->plt_header_size = 0;
3324 htab->plt_entry_size
3325 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3329 htab->plt_header_size
3330 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3331 htab->plt_entry_size
3332 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3336 if (!htab->root.splt
3337 || !htab->root.srelplt
3339 || (!info->shared && !htab->srelbss))
3345 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3348 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3349 struct elf_link_hash_entry *dir,
3350 struct elf_link_hash_entry *ind)
3352 struct elf32_arm_link_hash_entry *edir, *eind;
3354 edir = (struct elf32_arm_link_hash_entry *) dir;
3355 eind = (struct elf32_arm_link_hash_entry *) ind;
3357 if (eind->dyn_relocs != NULL)
3359 if (edir->dyn_relocs != NULL)
3361 struct elf_dyn_relocs **pp;
3362 struct elf_dyn_relocs *p;
3364 /* Add reloc counts against the indirect sym to the direct sym
3365 list. Merge any entries against the same section. */
3366 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3368 struct elf_dyn_relocs *q;
3370 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3371 if (q->sec == p->sec)
3373 q->pc_count += p->pc_count;
3374 q->count += p->count;
3381 *pp = edir->dyn_relocs;
3384 edir->dyn_relocs = eind->dyn_relocs;
3385 eind->dyn_relocs = NULL;
3388 if (ind->root.type == bfd_link_hash_indirect)
3390 /* Copy over PLT info. */
3391 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3392 eind->plt.thumb_refcount = 0;
3393 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3394 eind->plt.maybe_thumb_refcount = 0;
3395 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3396 eind->plt.noncall_refcount = 0;
3398 /* We should only allocate a function to .iplt once the final
3399 symbol information is known. */
3400 BFD_ASSERT (!eind->is_iplt);
3402 if (dir->got.refcount <= 0)
3404 edir->tls_type = eind->tls_type;
3405 eind->tls_type = GOT_UNKNOWN;
3409 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3412 /* Create an ARM elf linker hash table. */
3414 static struct bfd_link_hash_table *
3415 elf32_arm_link_hash_table_create (bfd *abfd)
3417 struct elf32_arm_link_hash_table *ret;
3418 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3420 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
3424 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3425 elf32_arm_link_hash_newfunc,
3426 sizeof (struct elf32_arm_link_hash_entry),
3433 ret->sdynbss = NULL;
3434 ret->srelbss = NULL;
3435 ret->srelplt2 = NULL;
3436 ret->dt_tlsdesc_plt = 0;
3437 ret->dt_tlsdesc_got = 0;
3438 ret->tls_trampoline = 0;
3439 ret->next_tls_desc_index = 0;
3440 ret->num_tls_desc = 0;
3441 ret->thumb_glue_size = 0;
3442 ret->arm_glue_size = 0;
3443 ret->bx_glue_size = 0;
3444 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
3445 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3446 ret->vfp11_erratum_glue_size = 0;
3447 ret->num_vfp11_fixes = 0;
3448 ret->fix_cortex_a8 = 0;
3449 ret->fix_arm1176 = 0;
3450 ret->bfd_of_glue_owner = NULL;
3451 ret->byteswap_code = 0;
3452 ret->target1_is_rel = 0;
3453 ret->target2_reloc = R_ARM_NONE;
3454 #ifdef FOUR_WORD_PLT
3455 ret->plt_header_size = 16;
3456 ret->plt_entry_size = 16;
3458 ret->plt_header_size = 20;
3459 ret->plt_entry_size = 12;
3467 ret->sym_cache.abfd = NULL;
3469 ret->tls_ldm_got.refcount = 0;
3470 ret->stub_bfd = NULL;
3471 ret->add_stub_section = NULL;
3472 ret->layout_sections_again = NULL;
3473 ret->stub_group = NULL;
3477 ret->input_list = NULL;
3479 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3480 sizeof (struct elf32_arm_stub_hash_entry)))
3486 return &ret->root.root;
3489 /* Free the derived linker hash table. */
3492 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
3494 struct elf32_arm_link_hash_table *ret
3495 = (struct elf32_arm_link_hash_table *) hash;
3497 bfd_hash_table_free (&ret->stub_hash_table);
3498 _bfd_generic_link_hash_table_free (hash);
3501 /* Determine if we're dealing with a Thumb only architecture. */
3504 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3506 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3510 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3513 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3516 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3517 Tag_CPU_arch_profile);
3519 return profile == 'M';
3522 /* Determine if we're dealing with a Thumb-2 object. */
3525 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3527 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3529 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3532 /* Determine what kind of NOPs are available. */
3535 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3537 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3539 return arch == TAG_CPU_ARCH_V6T2
3540 || arch == TAG_CPU_ARCH_V6K
3541 || arch == TAG_CPU_ARCH_V7
3542 || arch == TAG_CPU_ARCH_V7E_M;
3546 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3548 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3550 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3551 || arch == TAG_CPU_ARCH_V7E_M);
3555 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3559 case arm_stub_long_branch_thumb_only:
3560 case arm_stub_long_branch_v4t_thumb_arm:
3561 case arm_stub_short_branch_v4t_thumb_arm:
3562 case arm_stub_long_branch_v4t_thumb_arm_pic:
3563 case arm_stub_long_branch_v4t_thumb_tls_pic:
3564 case arm_stub_long_branch_thumb_only_pic:
3575 /* Determine the type of stub needed, if any, for a call. */
3577 static enum elf32_arm_stub_type
3578 arm_type_of_stub (struct bfd_link_info *info,
3579 asection *input_sec,
3580 const Elf_Internal_Rela *rel,
3581 unsigned char st_type,
3582 enum arm_st_branch_type *actual_branch_type,
3583 struct elf32_arm_link_hash_entry *hash,
3584 bfd_vma destination,
3590 bfd_signed_vma branch_offset;
3591 unsigned int r_type;
3592 struct elf32_arm_link_hash_table * globals;
3595 enum elf32_arm_stub_type stub_type = arm_stub_none;
3597 enum arm_st_branch_type branch_type = *actual_branch_type;
3598 union gotplt_union *root_plt;
3599 struct arm_plt_info *arm_plt;
3601 if (branch_type == ST_BRANCH_LONG)
3604 globals = elf32_arm_hash_table (info);
3605 if (globals == NULL)
3608 thumb_only = using_thumb_only (globals);
3610 thumb2 = using_thumb2 (globals);
3612 /* Determine where the call point is. */
3613 location = (input_sec->output_offset
3614 + input_sec->output_section->vma
3617 r_type = ELF32_R_TYPE (rel->r_info);
3619 /* For TLS call relocs, it is the caller's responsibility to provide
3620 the address of the appropriate trampoline. */
3621 if (r_type != R_ARM_TLS_CALL
3622 && r_type != R_ARM_THM_TLS_CALL
3623 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3624 &root_plt, &arm_plt)
3625 && root_plt->offset != (bfd_vma) -1)
3629 if (hash == NULL || hash->is_iplt)
3630 splt = globals->root.iplt;
3632 splt = globals->root.splt;
3637 /* Note when dealing with PLT entries: the main PLT stub is in
3638 ARM mode, so if the branch is in Thumb mode, another
3639 Thumb->ARM stub will be inserted later just before the ARM
3640 PLT stub. We don't take this extra distance into account
3641 here, because if a long branch stub is needed, we'll add a
3642 Thumb->Arm one and branch directly to the ARM PLT entry
3643 because it avoids spreading offset corrections in several
3646 destination = (splt->output_section->vma
3647 + splt->output_offset
3648 + root_plt->offset);
3650 branch_type = ST_BRANCH_TO_ARM;
3653 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3654 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3656 branch_offset = (bfd_signed_vma)(destination - location);
3658 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3659 || r_type == R_ARM_THM_TLS_CALL)
3661 /* Handle cases where:
3662 - this call goes too far (different Thumb/Thumb2 max
3664 - it's a Thumb->Arm call and blx is not available, or it's a
3665 Thumb->Arm branch (not bl). A stub is needed in this case,
3666 but only if this call is not through a PLT entry. Indeed,
3667 PLT stubs handle mode switching already.
3670 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3671 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3673 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3674 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3675 || (branch_type == ST_BRANCH_TO_ARM
3676 && (((r_type == R_ARM_THM_CALL
3677 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3678 || (r_type == R_ARM_THM_JUMP24))
3681 if (branch_type == ST_BRANCH_TO_THUMB)
3683 /* Thumb to thumb. */
3686 stub_type = (info->shared | globals->pic_veneer)
3688 ? ((globals->use_blx
3689 && (r_type == R_ARM_THM_CALL))
3690 /* V5T and above. Stub starts with ARM code, so
3691 we must be able to switch mode before
3692 reaching it, which is only possible for 'bl'
3693 (ie R_ARM_THM_CALL relocation). */
3694 ? arm_stub_long_branch_any_thumb_pic
3695 /* On V4T, use Thumb code only. */
3696 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3698 /* non-PIC stubs. */
3699 : ((globals->use_blx
3700 && (r_type == R_ARM_THM_CALL))
3701 /* V5T and above. */
3702 ? arm_stub_long_branch_any_any
3704 : arm_stub_long_branch_v4t_thumb_thumb);
3708 stub_type = (info->shared | globals->pic_veneer)
3710 ? arm_stub_long_branch_thumb_only_pic
3712 : arm_stub_long_branch_thumb_only;
3719 && sym_sec->owner != NULL
3720 && !INTERWORK_FLAG (sym_sec->owner))
3722 (*_bfd_error_handler)
3723 (_("%B(%s): warning: interworking not enabled.\n"
3724 " first occurrence: %B: Thumb call to ARM"),
3725 sym_sec->owner, input_bfd, name);
3729 (info->shared | globals->pic_veneer)
3731 ? (r_type == R_ARM_THM_TLS_CALL
3733 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3734 : arm_stub_long_branch_v4t_thumb_tls_pic)
3735 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3736 /* V5T PIC and above. */
3737 ? arm_stub_long_branch_any_arm_pic
3739 : arm_stub_long_branch_v4t_thumb_arm_pic))
3741 /* non-PIC stubs. */
3742 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3743 /* V5T and above. */
3744 ? arm_stub_long_branch_any_any
3746 : arm_stub_long_branch_v4t_thumb_arm);
3748 /* Handle v4t short branches. */
3749 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3750 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3751 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3752 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3756 else if (r_type == R_ARM_CALL
3757 || r_type == R_ARM_JUMP24
3758 || r_type == R_ARM_PLT32
3759 || r_type == R_ARM_TLS_CALL)
3761 if (branch_type == ST_BRANCH_TO_THUMB)
3766 && sym_sec->owner != NULL
3767 && !INTERWORK_FLAG (sym_sec->owner))
3769 (*_bfd_error_handler)
3770 (_("%B(%s): warning: interworking not enabled.\n"
3771 " first occurrence: %B: ARM call to Thumb"),
3772 sym_sec->owner, input_bfd, name);
3775 /* We have an extra 2-bytes reach because of
3776 the mode change (bit 24 (H) of BLX encoding). */
3777 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3778 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3779 || (r_type == R_ARM_CALL && !globals->use_blx)
3780 || (r_type == R_ARM_JUMP24)
3781 || (r_type == R_ARM_PLT32))
3783 stub_type = (info->shared | globals->pic_veneer)
3785 ? ((globals->use_blx)
3786 /* V5T and above. */
3787 ? arm_stub_long_branch_any_thumb_pic
3789 : arm_stub_long_branch_v4t_arm_thumb_pic)
3791 /* non-PIC stubs. */
3792 : ((globals->use_blx)
3793 /* V5T and above. */
3794 ? arm_stub_long_branch_any_any
3796 : arm_stub_long_branch_v4t_arm_thumb);
3802 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3803 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3806 (info->shared | globals->pic_veneer)
3808 ? (r_type == R_ARM_TLS_CALL
3810 ? arm_stub_long_branch_any_tls_pic
3811 : arm_stub_long_branch_any_arm_pic)
3812 /* non-PIC stubs. */
3813 : arm_stub_long_branch_any_any;
3818 /* If a stub is needed, record the actual destination type. */
3819 if (stub_type != arm_stub_none)
3820 *actual_branch_type = branch_type;
3825 /* Build a name for an entry in the stub hash table. */
3828 elf32_arm_stub_name (const asection *input_section,
3829 const asection *sym_sec,
3830 const struct elf32_arm_link_hash_entry *hash,
3831 const Elf_Internal_Rela *rel,
3832 enum elf32_arm_stub_type stub_type)
3839 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3840 stub_name = (char *) bfd_malloc (len);
3841 if (stub_name != NULL)
3842 sprintf (stub_name, "%08x_%s+%x_%d",
3843 input_section->id & 0xffffffff,
3844 hash->root.root.root.string,
3845 (int) rel->r_addend & 0xffffffff,
3850 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3851 stub_name = (char *) bfd_malloc (len);
3852 if (stub_name != NULL)
3853 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3854 input_section->id & 0xffffffff,
3855 sym_sec->id & 0xffffffff,
3856 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3857 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3858 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3859 (int) rel->r_addend & 0xffffffff,
3866 /* Look up an entry in the stub hash. Stub entries are cached because
3867 creating the stub name takes a bit of time. */
3869 static struct elf32_arm_stub_hash_entry *
3870 elf32_arm_get_stub_entry (const asection *input_section,
3871 const asection *sym_sec,
3872 struct elf_link_hash_entry *hash,
3873 const Elf_Internal_Rela *rel,
3874 struct elf32_arm_link_hash_table *htab,
3875 enum elf32_arm_stub_type stub_type)
3877 struct elf32_arm_stub_hash_entry *stub_entry;
3878 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3879 const asection *id_sec;
3881 if ((input_section->flags & SEC_CODE) == 0)
3884 /* If this input section is part of a group of sections sharing one
3885 stub section, then use the id of the first section in the group.
3886 Stub names need to include a section id, as there may well be
3887 more than one stub used to reach say, printf, and we need to
3888 distinguish between them. */
3889 id_sec = htab->stub_group[input_section->id].link_sec;
3891 if (h != NULL && h->stub_cache != NULL
3892 && h->stub_cache->h == h
3893 && h->stub_cache->id_sec == id_sec
3894 && h->stub_cache->stub_type == stub_type)
3896 stub_entry = h->stub_cache;
3902 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3903 if (stub_name == NULL)
3906 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3907 stub_name, FALSE, FALSE);
3909 h->stub_cache = stub_entry;
3917 /* Find or create a stub section. Returns a pointer to the stub section, and
3918 the section to which the stub section will be attached (in *LINK_SEC_P).
3919 LINK_SEC_P may be NULL. */
3922 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3923 struct elf32_arm_link_hash_table *htab)
3928 link_sec = htab->stub_group[section->id].link_sec;
3929 BFD_ASSERT (link_sec != NULL);
3930 stub_sec = htab->stub_group[section->id].stub_sec;
3932 if (stub_sec == NULL)
3934 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3935 if (stub_sec == NULL)
3941 namelen = strlen (link_sec->name);
3942 len = namelen + sizeof (STUB_SUFFIX);
3943 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3947 memcpy (s_name, link_sec->name, namelen);
3948 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3949 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3950 if (stub_sec == NULL)
3952 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3954 htab->stub_group[section->id].stub_sec = stub_sec;
3958 *link_sec_p = link_sec;
3963 /* Add a new stub entry to the stub hash. Not all fields of the new
3964 stub entry are initialised. */
3966 static struct elf32_arm_stub_hash_entry *
3967 elf32_arm_add_stub (const char *stub_name,
3969 struct elf32_arm_link_hash_table *htab)
3973 struct elf32_arm_stub_hash_entry *stub_entry;
3975 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3976 if (stub_sec == NULL)
3979 /* Enter this entry into the linker stub hash table. */
3980 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3982 if (stub_entry == NULL)
3984 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3990 stub_entry->stub_sec = stub_sec;
3991 stub_entry->stub_offset = 0;
3992 stub_entry->id_sec = link_sec;
3997 /* Store an Arm insn into an output section not processed by
3998 elf32_arm_write_section. */
4001 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4002 bfd * output_bfd, bfd_vma val, void * ptr)
4004 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4005 bfd_putl32 (val, ptr);
4007 bfd_putb32 (val, ptr);
4010 /* Store a 16-bit Thumb insn into an output section not processed by
4011 elf32_arm_write_section. */
4014 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4015 bfd * output_bfd, bfd_vma val, void * ptr)
4017 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4018 bfd_putl16 (val, ptr);
4020 bfd_putb16 (val, ptr);
4023 /* If it's possible to change R_TYPE to a more efficient access
4024 model, return the new reloc type. */
4027 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4028 struct elf_link_hash_entry *h)
4030 int is_local = (h == NULL);
4032 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
4035 /* We do not support relaxations for Old TLS models. */
4038 case R_ARM_TLS_GOTDESC:
4039 case R_ARM_TLS_CALL:
4040 case R_ARM_THM_TLS_CALL:
4041 case R_ARM_TLS_DESCSEQ:
4042 case R_ARM_THM_TLS_DESCSEQ:
4043 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4049 static bfd_reloc_status_type elf32_arm_final_link_relocate
4050 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4051 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4052 const char *, unsigned char, enum arm_st_branch_type,
4053 struct elf_link_hash_entry *, bfd_boolean *, char **);
4056 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4060 case arm_stub_a8_veneer_b_cond:
4061 case arm_stub_a8_veneer_b:
4062 case arm_stub_a8_veneer_bl:
4065 case arm_stub_long_branch_any_any:
4066 case arm_stub_long_branch_v4t_arm_thumb:
4067 case arm_stub_long_branch_thumb_only:
4068 case arm_stub_long_branch_v4t_thumb_thumb:
4069 case arm_stub_long_branch_v4t_thumb_arm:
4070 case arm_stub_short_branch_v4t_thumb_arm:
4071 case arm_stub_long_branch_any_arm_pic:
4072 case arm_stub_long_branch_any_thumb_pic:
4073 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4074 case arm_stub_long_branch_v4t_arm_thumb_pic:
4075 case arm_stub_long_branch_v4t_thumb_arm_pic:
4076 case arm_stub_long_branch_thumb_only_pic:
4077 case arm_stub_long_branch_any_tls_pic:
4078 case arm_stub_long_branch_v4t_thumb_tls_pic:
4079 case arm_stub_a8_veneer_blx:
4083 abort (); /* Should be unreachable. */
4088 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4092 struct elf32_arm_stub_hash_entry *stub_entry;
4093 struct elf32_arm_link_hash_table *globals;
4094 struct bfd_link_info *info;
4101 const insn_sequence *template_sequence;
4103 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4104 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4107 /* Massage our args to the form they really have. */
4108 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4109 info = (struct bfd_link_info *) in_arg;
4111 globals = elf32_arm_hash_table (info);
4112 if (globals == NULL)
4115 stub_sec = stub_entry->stub_sec;
4117 if ((globals->fix_cortex_a8 < 0)
4118 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4119 /* We have to do less-strictly-aligned fixes last. */
4122 /* Make a note of the offset within the stubs for this entry. */
4123 stub_entry->stub_offset = stub_sec->size;
4124 loc = stub_sec->contents + stub_entry->stub_offset;
4126 stub_bfd = stub_sec->owner;
4128 /* This is the address of the stub destination. */
4129 sym_value = (stub_entry->target_value
4130 + stub_entry->target_section->output_offset
4131 + stub_entry->target_section->output_section->vma);
4133 template_sequence = stub_entry->stub_template;
4134 template_size = stub_entry->stub_template_size;
4137 for (i = 0; i < template_size; i++)
4139 switch (template_sequence[i].type)
4143 bfd_vma data = (bfd_vma) template_sequence[i].data;
4144 if (template_sequence[i].reloc_addend != 0)
4146 /* We've borrowed the reloc_addend field to mean we should
4147 insert a condition code into this (Thumb-1 branch)
4148 instruction. See THUMB16_BCOND_INSN. */
4149 BFD_ASSERT ((data & 0xff00) == 0xd000);
4150 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4152 bfd_put_16 (stub_bfd, data, loc + size);
4158 bfd_put_16 (stub_bfd,
4159 (template_sequence[i].data >> 16) & 0xffff,
4161 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4163 if (template_sequence[i].r_type != R_ARM_NONE)
4165 stub_reloc_idx[nrelocs] = i;
4166 stub_reloc_offset[nrelocs++] = size;
4172 bfd_put_32 (stub_bfd, template_sequence[i].data,
4174 /* Handle cases where the target is encoded within the
4176 if (template_sequence[i].r_type == R_ARM_JUMP24)
4178 stub_reloc_idx[nrelocs] = i;
4179 stub_reloc_offset[nrelocs++] = size;
4185 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4186 stub_reloc_idx[nrelocs] = i;
4187 stub_reloc_offset[nrelocs++] = size;
4197 stub_sec->size += size;
4199 /* Stub size has already been computed in arm_size_one_stub. Check
4201 BFD_ASSERT (size == stub_entry->stub_size);
4203 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4204 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4207 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4209 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4211 for (i = 0; i < nrelocs; i++)
4212 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4213 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4214 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4215 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4217 Elf_Internal_Rela rel;
4218 bfd_boolean unresolved_reloc;
4219 char *error_message;
4220 enum arm_st_branch_type branch_type
4221 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4222 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4223 bfd_vma points_to = sym_value + stub_entry->target_addend;
4225 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4226 rel.r_info = ELF32_R_INFO (0,
4227 template_sequence[stub_reloc_idx[i]].r_type);
4228 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4230 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4231 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4232 template should refer back to the instruction after the original
4234 points_to = sym_value;
4236 /* There may be unintended consequences if this is not true. */
4237 BFD_ASSERT (stub_entry->h == NULL);
4239 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4240 properly. We should probably use this function unconditionally,
4241 rather than only for certain relocations listed in the enclosing
4242 conditional, for the sake of consistency. */
4243 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4244 (template_sequence[stub_reloc_idx[i]].r_type),
4245 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4246 points_to, info, stub_entry->target_section, "", STT_FUNC,
4247 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4248 &unresolved_reloc, &error_message);
4252 Elf_Internal_Rela rel;
4253 bfd_boolean unresolved_reloc;
4254 char *error_message;
4255 bfd_vma points_to = sym_value + stub_entry->target_addend
4256 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4258 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4259 rel.r_info = ELF32_R_INFO (0,
4260 template_sequence[stub_reloc_idx[i]].r_type);
4263 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4264 (template_sequence[stub_reloc_idx[i]].r_type),
4265 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4266 points_to, info, stub_entry->target_section, "", STT_FUNC,
4267 stub_entry->branch_type,
4268 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4276 /* Calculate the template, template size and instruction size for a stub.
4277 Return value is the instruction size. */
4280 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4281 const insn_sequence **stub_template,
4282 int *stub_template_size)
4284 const insn_sequence *template_sequence = NULL;
4285 int template_size = 0, i;
4288 template_sequence = stub_definitions[stub_type].template_sequence;
4290 *stub_template = template_sequence;
4292 template_size = stub_definitions[stub_type].template_size;
4293 if (stub_template_size)
4294 *stub_template_size = template_size;
4297 for (i = 0; i < template_size; i++)
4299 switch (template_sequence[i].type)
4320 /* As above, but don't actually build the stub. Just bump offset so
4321 we know stub section sizes. */
4324 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4325 void *in_arg ATTRIBUTE_UNUSED)
4327 struct elf32_arm_stub_hash_entry *stub_entry;
4328 const insn_sequence *template_sequence;
4329 int template_size, size;
4331 /* Massage our args to the form they really have. */
4332 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4334 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4335 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4337 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4340 stub_entry->stub_size = size;
4341 stub_entry->stub_template = template_sequence;
4342 stub_entry->stub_template_size = template_size;
4344 size = (size + 7) & ~7;
4345 stub_entry->stub_sec->size += size;
4350 /* External entry points for sizing and building linker stubs. */
4352 /* Set up various things so that we can make a list of input sections
4353 for each output section included in the link. Returns -1 on error,
4354 0 when no stubs will be needed, and 1 on success. */
4357 elf32_arm_setup_section_lists (bfd *output_bfd,
4358 struct bfd_link_info *info)
4361 unsigned int bfd_count;
4362 int top_id, top_index;
4364 asection **input_list, **list;
4366 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4370 if (! is_elf_hash_table (htab))
4373 /* Count the number of input BFDs and find the top input section id. */
4374 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4376 input_bfd = input_bfd->link_next)
4379 for (section = input_bfd->sections;
4381 section = section->next)
4383 if (top_id < section->id)
4384 top_id = section->id;
4387 htab->bfd_count = bfd_count;
4389 amt = sizeof (struct map_stub) * (top_id + 1);
4390 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4391 if (htab->stub_group == NULL)
4393 htab->top_id = top_id;
4395 /* We can't use output_bfd->section_count here to find the top output
4396 section index as some sections may have been removed, and
4397 _bfd_strip_section_from_output doesn't renumber the indices. */
4398 for (section = output_bfd->sections, top_index = 0;
4400 section = section->next)
4402 if (top_index < section->index)
4403 top_index = section->index;
4406 htab->top_index = top_index;
4407 amt = sizeof (asection *) * (top_index + 1);
4408 input_list = (asection **) bfd_malloc (amt);
4409 htab->input_list = input_list;
4410 if (input_list == NULL)
4413 /* For sections we aren't interested in, mark their entries with a
4414 value we can check later. */
4415 list = input_list + top_index;
4417 *list = bfd_abs_section_ptr;
4418 while (list-- != input_list);
4420 for (section = output_bfd->sections;
4422 section = section->next)
4424 if ((section->flags & SEC_CODE) != 0)
4425 input_list[section->index] = NULL;
4431 /* The linker repeatedly calls this function for each input section,
4432 in the order that input sections are linked into output sections.
4433 Build lists of input sections to determine groupings between which
4434 we may insert linker stubs. */
4437 elf32_arm_next_input_section (struct bfd_link_info *info,
4440 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4445 if (isec->output_section->index <= htab->top_index)
4447 asection **list = htab->input_list + isec->output_section->index;
4449 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4451 /* Steal the link_sec pointer for our list. */
4452 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4453 /* This happens to make the list in reverse order,
4454 which we reverse later. */
4455 PREV_SEC (isec) = *list;
4461 /* See whether we can group stub sections together. Grouping stub
4462 sections may result in fewer stubs. More importantly, we need to
4463 put all .init* and .fini* stubs at the end of the .init or
4464 .fini output sections respectively, because glibc splits the
4465 _init and _fini functions into multiple parts. Putting a stub in
4466 the middle of a function is not a good idea. */
4469 group_sections (struct elf32_arm_link_hash_table *htab,
4470 bfd_size_type stub_group_size,
4471 bfd_boolean stubs_always_after_branch)
4473 asection **list = htab->input_list;
4477 asection *tail = *list;
4480 if (tail == bfd_abs_section_ptr)
4483 /* Reverse the list: we must avoid placing stubs at the
4484 beginning of the section because the beginning of the text
4485 section may be required for an interrupt vector in bare metal
4487 #define NEXT_SEC PREV_SEC
4489 while (tail != NULL)
4491 /* Pop from tail. */
4492 asection *item = tail;
4493 tail = PREV_SEC (item);
4496 NEXT_SEC (item) = head;
4500 while (head != NULL)
4504 bfd_vma stub_group_start = head->output_offset;
4505 bfd_vma end_of_next;
4508 while (NEXT_SEC (curr) != NULL)
4510 next = NEXT_SEC (curr);
4511 end_of_next = next->output_offset + next->size;
4512 if (end_of_next - stub_group_start >= stub_group_size)
4513 /* End of NEXT is too far from start, so stop. */
4515 /* Add NEXT to the group. */
4519 /* OK, the size from the start to the start of CURR is less
4520 than stub_group_size and thus can be handled by one stub
4521 section. (Or the head section is itself larger than
4522 stub_group_size, in which case we may be toast.)
4523 We should really be keeping track of the total size of
4524 stubs added here, as stubs contribute to the final output
4528 next = NEXT_SEC (head);
4529 /* Set up this stub group. */
4530 htab->stub_group[head->id].link_sec = curr;
4532 while (head != curr && (head = next) != NULL);
4534 /* But wait, there's more! Input sections up to stub_group_size
4535 bytes after the stub section can be handled by it too. */
4536 if (!stubs_always_after_branch)
4538 stub_group_start = curr->output_offset + curr->size;
4540 while (next != NULL)
4542 end_of_next = next->output_offset + next->size;
4543 if (end_of_next - stub_group_start >= stub_group_size)
4544 /* End of NEXT is too far from stubs, so stop. */
4546 /* Add NEXT to the stub group. */
4548 next = NEXT_SEC (head);
4549 htab->stub_group[head->id].link_sec = curr;
4555 while (list++ != htab->input_list + htab->top_index);
4557 free (htab->input_list);
4562 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4566 a8_reloc_compare (const void *a, const void *b)
4568 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4569 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4571 if (ra->from < rb->from)
4573 else if (ra->from > rb->from)
4579 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4580 const char *, char **);
4582 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4583 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4584 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4588 cortex_a8_erratum_scan (bfd *input_bfd,
4589 struct bfd_link_info *info,
4590 struct a8_erratum_fix **a8_fixes_p,
4591 unsigned int *num_a8_fixes_p,
4592 unsigned int *a8_fix_table_size_p,
4593 struct a8_erratum_reloc *a8_relocs,
4594 unsigned int num_a8_relocs,
4595 unsigned prev_num_a8_fixes,
4596 bfd_boolean *stub_changed_p)
4599 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4600 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4601 unsigned int num_a8_fixes = *num_a8_fixes_p;
4602 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4607 for (section = input_bfd->sections;
4609 section = section->next)
4611 bfd_byte *contents = NULL;
4612 struct _arm_elf_section_data *sec_data;
4616 if (elf_section_type (section) != SHT_PROGBITS
4617 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4618 || (section->flags & SEC_EXCLUDE) != 0
4619 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4620 || (section->output_section == bfd_abs_section_ptr))
4623 base_vma = section->output_section->vma + section->output_offset;
4625 if (elf_section_data (section)->this_hdr.contents != NULL)
4626 contents = elf_section_data (section)->this_hdr.contents;
4627 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4630 sec_data = elf32_arm_section_data (section);
4632 for (span = 0; span < sec_data->mapcount; span++)
4634 unsigned int span_start = sec_data->map[span].vma;
4635 unsigned int span_end = (span == sec_data->mapcount - 1)
4636 ? section->size : sec_data->map[span + 1].vma;
4638 char span_type = sec_data->map[span].type;
4639 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4641 if (span_type != 't')
4644 /* Span is entirely within a single 4KB region: skip scanning. */
4645 if (((base_vma + span_start) & ~0xfff)
4646 == ((base_vma + span_end) & ~0xfff))
4649 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4651 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4652 * The branch target is in the same 4KB region as the
4653 first half of the branch.
4654 * The instruction before the branch is a 32-bit
4655 length non-branch instruction. */
4656 for (i = span_start; i < span_end;)
4658 unsigned int insn = bfd_getl16 (&contents[i]);
4659 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4660 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4662 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4667 /* Load the rest of the insn (in manual-friendly order). */
4668 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4670 /* Encoding T4: B<c>.W. */
4671 is_b = (insn & 0xf800d000) == 0xf0009000;
4672 /* Encoding T1: BL<c>.W. */
4673 is_bl = (insn & 0xf800d000) == 0xf000d000;
4674 /* Encoding T2: BLX<c>.W. */
4675 is_blx = (insn & 0xf800d000) == 0xf000c000;
4676 /* Encoding T3: B<c>.W (not permitted in IT block). */
4677 is_bcc = (insn & 0xf800d000) == 0xf0008000
4678 && (insn & 0x07f00000) != 0x03800000;
4681 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4683 if (((base_vma + i) & 0xfff) == 0xffe
4687 && ! last_was_branch)
4689 bfd_signed_vma offset = 0;
4690 bfd_boolean force_target_arm = FALSE;
4691 bfd_boolean force_target_thumb = FALSE;
4693 enum elf32_arm_stub_type stub_type = arm_stub_none;
4694 struct a8_erratum_reloc key, *found;
4695 bfd_boolean use_plt = FALSE;
4697 key.from = base_vma + i;
4698 found = (struct a8_erratum_reloc *)
4699 bsearch (&key, a8_relocs, num_a8_relocs,
4700 sizeof (struct a8_erratum_reloc),
4705 char *error_message = NULL;
4706 struct elf_link_hash_entry *entry;
4708 /* We don't care about the error returned from this
4709 function, only if there is glue or not. */
4710 entry = find_thumb_glue (info, found->sym_name,
4714 found->non_a8_stub = TRUE;
4716 /* Keep a simpler condition, for the sake of clarity. */
4717 if (htab->root.splt != NULL && found->hash != NULL
4718 && found->hash->root.plt.offset != (bfd_vma) -1)
4721 if (found->r_type == R_ARM_THM_CALL)
4723 if (found->branch_type == ST_BRANCH_TO_ARM
4725 force_target_arm = TRUE;
4727 force_target_thumb = TRUE;
4731 /* Check if we have an offending branch instruction. */
4733 if (found && found->non_a8_stub)
4734 /* We've already made a stub for this instruction, e.g.
4735 it's a long branch or a Thumb->ARM stub. Assume that
4736 stub will suffice to work around the A8 erratum (see
4737 setting of always_after_branch above). */
4741 offset = (insn & 0x7ff) << 1;
4742 offset |= (insn & 0x3f0000) >> 4;
4743 offset |= (insn & 0x2000) ? 0x40000 : 0;
4744 offset |= (insn & 0x800) ? 0x80000 : 0;
4745 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4746 if (offset & 0x100000)
4747 offset |= ~ ((bfd_signed_vma) 0xfffff);
4748 stub_type = arm_stub_a8_veneer_b_cond;
4750 else if (is_b || is_bl || is_blx)
4752 int s = (insn & 0x4000000) != 0;
4753 int j1 = (insn & 0x2000) != 0;
4754 int j2 = (insn & 0x800) != 0;
4758 offset = (insn & 0x7ff) << 1;
4759 offset |= (insn & 0x3ff0000) >> 4;
4763 if (offset & 0x1000000)
4764 offset |= ~ ((bfd_signed_vma) 0xffffff);
4767 offset &= ~ ((bfd_signed_vma) 3);
4769 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4770 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4773 if (stub_type != arm_stub_none)
4775 bfd_vma pc_for_insn = base_vma + i + 4;
4777 /* The original instruction is a BL, but the target is
4778 an ARM instruction. If we were not making a stub,
4779 the BL would have been converted to a BLX. Use the
4780 BLX stub instead in that case. */
4781 if (htab->use_blx && force_target_arm
4782 && stub_type == arm_stub_a8_veneer_bl)
4784 stub_type = arm_stub_a8_veneer_blx;
4788 /* Conversely, if the original instruction was
4789 BLX but the target is Thumb mode, use the BL
4791 else if (force_target_thumb
4792 && stub_type == arm_stub_a8_veneer_blx)
4794 stub_type = arm_stub_a8_veneer_bl;
4800 pc_for_insn &= ~ ((bfd_vma) 3);
4802 /* If we found a relocation, use the proper destination,
4803 not the offset in the (unrelocated) instruction.
4804 Note this is always done if we switched the stub type
4808 (bfd_signed_vma) (found->destination - pc_for_insn);
4810 /* If the stub will use a Thumb-mode branch to a
4811 PLT target, redirect it to the preceding Thumb
4813 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4814 offset -= PLT_THUMB_STUB_SIZE;
4816 target = pc_for_insn + offset;
4818 /* The BLX stub is ARM-mode code. Adjust the offset to
4819 take the different PC value (+8 instead of +4) into
4821 if (stub_type == arm_stub_a8_veneer_blx)
4824 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4826 char *stub_name = NULL;
4828 if (num_a8_fixes == a8_fix_table_size)
4830 a8_fix_table_size *= 2;
4831 a8_fixes = (struct a8_erratum_fix *)
4832 bfd_realloc (a8_fixes,
4833 sizeof (struct a8_erratum_fix)
4834 * a8_fix_table_size);
4837 if (num_a8_fixes < prev_num_a8_fixes)
4839 /* If we're doing a subsequent scan,
4840 check if we've found the same fix as
4841 before, and try and reuse the stub
4843 stub_name = a8_fixes[num_a8_fixes].stub_name;
4844 if ((a8_fixes[num_a8_fixes].section != section)
4845 || (a8_fixes[num_a8_fixes].offset != i))
4849 *stub_changed_p = TRUE;
4855 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4856 if (stub_name != NULL)
4857 sprintf (stub_name, "%x:%x", section->id, i);
4860 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4861 a8_fixes[num_a8_fixes].section = section;
4862 a8_fixes[num_a8_fixes].offset = i;
4863 a8_fixes[num_a8_fixes].addend = offset;
4864 a8_fixes[num_a8_fixes].orig_insn = insn;
4865 a8_fixes[num_a8_fixes].stub_name = stub_name;
4866 a8_fixes[num_a8_fixes].stub_type = stub_type;
4867 a8_fixes[num_a8_fixes].branch_type =
4868 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4875 i += insn_32bit ? 4 : 2;
4876 last_was_32bit = insn_32bit;
4877 last_was_branch = is_32bit_branch;
4881 if (elf_section_data (section)->this_hdr.contents == NULL)
4885 *a8_fixes_p = a8_fixes;
4886 *num_a8_fixes_p = num_a8_fixes;
4887 *a8_fix_table_size_p = a8_fix_table_size;
4892 /* Determine and set the size of the stub section for a final link.
4894 The basic idea here is to examine all the relocations looking for
4895 PC-relative calls to a target that is unreachable with a "bl"
4899 elf32_arm_size_stubs (bfd *output_bfd,
4901 struct bfd_link_info *info,
4902 bfd_signed_vma group_size,
4903 asection * (*add_stub_section) (const char *, asection *),
4904 void (*layout_sections_again) (void))
4906 bfd_size_type stub_group_size;
4907 bfd_boolean stubs_always_after_branch;
4908 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4909 struct a8_erratum_fix *a8_fixes = NULL;
4910 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4911 struct a8_erratum_reloc *a8_relocs = NULL;
4912 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4917 if (htab->fix_cortex_a8)
4919 a8_fixes = (struct a8_erratum_fix *)
4920 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4921 a8_relocs = (struct a8_erratum_reloc *)
4922 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4925 /* Propagate mach to stub bfd, because it may not have been
4926 finalized when we created stub_bfd. */
4927 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4928 bfd_get_mach (output_bfd));
4930 /* Stash our params away. */
4931 htab->stub_bfd = stub_bfd;
4932 htab->add_stub_section = add_stub_section;
4933 htab->layout_sections_again = layout_sections_again;
4934 stubs_always_after_branch = group_size < 0;
4936 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4937 as the first half of a 32-bit branch straddling two 4K pages. This is a
4938 crude way of enforcing that. */
4939 if (htab->fix_cortex_a8)
4940 stubs_always_after_branch = 1;
4943 stub_group_size = -group_size;
4945 stub_group_size = group_size;
4947 if (stub_group_size == 1)
4949 /* Default values. */
4950 /* Thumb branch range is +-4MB has to be used as the default
4951 maximum size (a given section can contain both ARM and Thumb
4952 code, so the worst case has to be taken into account).
4954 This value is 24K less than that, which allows for 2025
4955 12-byte stubs. If we exceed that, then we will fail to link.
4956 The user will have to relink with an explicit group size
4958 stub_group_size = 4170000;
4961 group_sections (htab, stub_group_size, stubs_always_after_branch);
4963 /* If we're applying the cortex A8 fix, we need to determine the
4964 program header size now, because we cannot change it later --
4965 that could alter section placements. Notice the A8 erratum fix
4966 ends up requiring the section addresses to remain unchanged
4967 modulo the page size. That's something we cannot represent
4968 inside BFD, and we don't want to force the section alignment to
4969 be the page size. */
4970 if (htab->fix_cortex_a8)
4971 (*htab->layout_sections_again) ();
4976 unsigned int bfd_indx;
4978 bfd_boolean stub_changed = FALSE;
4979 unsigned prev_num_a8_fixes = num_a8_fixes;
4982 for (input_bfd = info->input_bfds, bfd_indx = 0;
4984 input_bfd = input_bfd->link_next, bfd_indx++)
4986 Elf_Internal_Shdr *symtab_hdr;
4988 Elf_Internal_Sym *local_syms = NULL;
4990 if (!is_arm_elf (input_bfd))
4995 /* We'll need the symbol table in a second. */
4996 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4997 if (symtab_hdr->sh_info == 0)
5000 /* Walk over each section attached to the input bfd. */
5001 for (section = input_bfd->sections;
5003 section = section->next)
5005 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5007 /* If there aren't any relocs, then there's nothing more
5009 if ((section->flags & SEC_RELOC) == 0
5010 || section->reloc_count == 0
5011 || (section->flags & SEC_CODE) == 0)
5014 /* If this section is a link-once section that will be
5015 discarded, then don't create any stubs. */
5016 if (section->output_section == NULL
5017 || section->output_section->owner != output_bfd)
5020 /* Get the relocs. */
5022 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5023 NULL, info->keep_memory);
5024 if (internal_relocs == NULL)
5025 goto error_ret_free_local;
5027 /* Now examine each relocation. */
5028 irela = internal_relocs;
5029 irelaend = irela + section->reloc_count;
5030 for (; irela < irelaend; irela++)
5032 unsigned int r_type, r_indx;
5033 enum elf32_arm_stub_type stub_type;
5034 struct elf32_arm_stub_hash_entry *stub_entry;
5037 bfd_vma destination;
5038 struct elf32_arm_link_hash_entry *hash;
5039 const char *sym_name;
5041 const asection *id_sec;
5042 unsigned char st_type;
5043 enum arm_st_branch_type branch_type;
5044 bfd_boolean created_stub = FALSE;
5046 r_type = ELF32_R_TYPE (irela->r_info);
5047 r_indx = ELF32_R_SYM (irela->r_info);
5049 if (r_type >= (unsigned int) R_ARM_max)
5051 bfd_set_error (bfd_error_bad_value);
5052 error_ret_free_internal:
5053 if (elf_section_data (section)->relocs == NULL)
5054 free (internal_relocs);
5055 goto error_ret_free_local;
5059 if (r_indx >= symtab_hdr->sh_info)
5060 hash = elf32_arm_hash_entry
5061 (elf_sym_hashes (input_bfd)
5062 [r_indx - symtab_hdr->sh_info]);
5064 /* Only look for stubs on branch instructions, or
5065 non-relaxed TLSCALL */
5066 if ((r_type != (unsigned int) R_ARM_CALL)
5067 && (r_type != (unsigned int) R_ARM_THM_CALL)
5068 && (r_type != (unsigned int) R_ARM_JUMP24)
5069 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5070 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5071 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5072 && (r_type != (unsigned int) R_ARM_PLT32)
5073 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5074 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5075 && r_type == elf32_arm_tls_transition
5076 (info, r_type, &hash->root)
5077 && ((hash ? hash->tls_type
5078 : (elf32_arm_local_got_tls_type
5079 (input_bfd)[r_indx]))
5080 & GOT_TLS_GDESC) != 0))
5083 /* Now determine the call target, its name, value,
5090 if (r_type == (unsigned int) R_ARM_TLS_CALL
5091 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5093 /* A non-relaxed TLS call. The target is the
5094 plt-resident trampoline and nothing to do
5096 BFD_ASSERT (htab->tls_trampoline > 0);
5097 sym_sec = htab->root.splt;
5098 sym_value = htab->tls_trampoline;
5101 branch_type = ST_BRANCH_TO_ARM;
5105 /* It's a local symbol. */
5106 Elf_Internal_Sym *sym;
5108 if (local_syms == NULL)
5111 = (Elf_Internal_Sym *) symtab_hdr->contents;
5112 if (local_syms == NULL)
5114 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5115 symtab_hdr->sh_info, 0,
5117 if (local_syms == NULL)
5118 goto error_ret_free_internal;
5121 sym = local_syms + r_indx;
5122 if (sym->st_shndx == SHN_UNDEF)
5123 sym_sec = bfd_und_section_ptr;
5124 else if (sym->st_shndx == SHN_ABS)
5125 sym_sec = bfd_abs_section_ptr;
5126 else if (sym->st_shndx == SHN_COMMON)
5127 sym_sec = bfd_com_section_ptr;
5130 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5133 /* This is an undefined symbol. It can never
5137 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5138 sym_value = sym->st_value;
5139 destination = (sym_value + irela->r_addend
5140 + sym_sec->output_offset
5141 + sym_sec->output_section->vma);
5142 st_type = ELF_ST_TYPE (sym->st_info);
5143 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5145 = bfd_elf_string_from_elf_section (input_bfd,
5146 symtab_hdr->sh_link,
5151 /* It's an external symbol. */
5152 while (hash->root.root.type == bfd_link_hash_indirect
5153 || hash->root.root.type == bfd_link_hash_warning)
5154 hash = ((struct elf32_arm_link_hash_entry *)
5155 hash->root.root.u.i.link);
5157 if (hash->root.root.type == bfd_link_hash_defined
5158 || hash->root.root.type == bfd_link_hash_defweak)
5160 sym_sec = hash->root.root.u.def.section;
5161 sym_value = hash->root.root.u.def.value;
5163 struct elf32_arm_link_hash_table *globals =
5164 elf32_arm_hash_table (info);
5166 /* For a destination in a shared library,
5167 use the PLT stub as target address to
5168 decide whether a branch stub is
5171 && globals->root.splt != NULL
5173 && hash->root.plt.offset != (bfd_vma) -1)
5175 sym_sec = globals->root.splt;
5176 sym_value = hash->root.plt.offset;
5177 if (sym_sec->output_section != NULL)
5178 destination = (sym_value
5179 + sym_sec->output_offset
5180 + sym_sec->output_section->vma);
5182 else if (sym_sec->output_section != NULL)
5183 destination = (sym_value + irela->r_addend
5184 + sym_sec->output_offset
5185 + sym_sec->output_section->vma);
5187 else if ((hash->root.root.type == bfd_link_hash_undefined)
5188 || (hash->root.root.type == bfd_link_hash_undefweak))
5190 /* For a shared library, use the PLT stub as
5191 target address to decide whether a long
5192 branch stub is needed.
5193 For absolute code, they cannot be handled. */
5194 struct elf32_arm_link_hash_table *globals =
5195 elf32_arm_hash_table (info);
5198 && globals->root.splt != NULL
5200 && hash->root.plt.offset != (bfd_vma) -1)
5202 sym_sec = globals->root.splt;
5203 sym_value = hash->root.plt.offset;
5204 if (sym_sec->output_section != NULL)
5205 destination = (sym_value
5206 + sym_sec->output_offset
5207 + sym_sec->output_section->vma);
5214 bfd_set_error (bfd_error_bad_value);
5215 goto error_ret_free_internal;
5217 st_type = hash->root.type;
5218 branch_type = hash->root.target_internal;
5219 sym_name = hash->root.root.root.string;
5224 /* Determine what (if any) linker stub is needed. */
5225 stub_type = arm_type_of_stub (info, section, irela,
5226 st_type, &branch_type,
5227 hash, destination, sym_sec,
5228 input_bfd, sym_name);
5229 if (stub_type == arm_stub_none)
5232 /* Support for grouping stub sections. */
5233 id_sec = htab->stub_group[section->id].link_sec;
5235 /* Get the name of this stub. */
5236 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5239 goto error_ret_free_internal;
5241 /* We've either created a stub for this reloc already,
5242 or we are about to. */
5243 created_stub = TRUE;
5245 stub_entry = arm_stub_hash_lookup
5246 (&htab->stub_hash_table, stub_name,
5248 if (stub_entry != NULL)
5250 /* The proper stub has already been created. */
5252 stub_entry->target_value = sym_value;
5256 stub_entry = elf32_arm_add_stub (stub_name, section,
5258 if (stub_entry == NULL)
5261 goto error_ret_free_internal;
5264 stub_entry->target_value = sym_value;
5265 stub_entry->target_section = sym_sec;
5266 stub_entry->stub_type = stub_type;
5267 stub_entry->h = hash;
5268 stub_entry->branch_type = branch_type;
5270 if (sym_name == NULL)
5271 sym_name = "unnamed";
5272 stub_entry->output_name = (char *)
5273 bfd_alloc (htab->stub_bfd,
5274 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5275 + strlen (sym_name));
5276 if (stub_entry->output_name == NULL)
5279 goto error_ret_free_internal;
5282 /* For historical reasons, use the existing names for
5283 ARM-to-Thumb and Thumb-to-ARM stubs. */
5284 if ((r_type == (unsigned int) R_ARM_THM_CALL
5285 || r_type == (unsigned int) R_ARM_THM_JUMP24)
5286 && branch_type == ST_BRANCH_TO_ARM)
5287 sprintf (stub_entry->output_name,
5288 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5289 else if ((r_type == (unsigned int) R_ARM_CALL
5290 || r_type == (unsigned int) R_ARM_JUMP24)
5291 && branch_type == ST_BRANCH_TO_THUMB)
5292 sprintf (stub_entry->output_name,
5293 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5295 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5298 stub_changed = TRUE;
5302 /* Look for relocations which might trigger Cortex-A8
5304 if (htab->fix_cortex_a8
5305 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5306 || r_type == (unsigned int) R_ARM_THM_JUMP19
5307 || r_type == (unsigned int) R_ARM_THM_CALL
5308 || r_type == (unsigned int) R_ARM_THM_XPC22))
5310 bfd_vma from = section->output_section->vma
5311 + section->output_offset
5314 if ((from & 0xfff) == 0xffe)
5316 /* Found a candidate. Note we haven't checked the
5317 destination is within 4K here: if we do so (and
5318 don't create an entry in a8_relocs) we can't tell
5319 that a branch should have been relocated when
5321 if (num_a8_relocs == a8_reloc_table_size)
5323 a8_reloc_table_size *= 2;
5324 a8_relocs = (struct a8_erratum_reloc *)
5325 bfd_realloc (a8_relocs,
5326 sizeof (struct a8_erratum_reloc)
5327 * a8_reloc_table_size);
5330 a8_relocs[num_a8_relocs].from = from;
5331 a8_relocs[num_a8_relocs].destination = destination;
5332 a8_relocs[num_a8_relocs].r_type = r_type;
5333 a8_relocs[num_a8_relocs].branch_type = branch_type;
5334 a8_relocs[num_a8_relocs].sym_name = sym_name;
5335 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5336 a8_relocs[num_a8_relocs].hash = hash;
5343 /* We're done with the internal relocs, free them. */
5344 if (elf_section_data (section)->relocs == NULL)
5345 free (internal_relocs);
5348 if (htab->fix_cortex_a8)
5350 /* Sort relocs which might apply to Cortex-A8 erratum. */
5351 qsort (a8_relocs, num_a8_relocs,
5352 sizeof (struct a8_erratum_reloc),
5355 /* Scan for branches which might trigger Cortex-A8 erratum. */
5356 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5357 &num_a8_fixes, &a8_fix_table_size,
5358 a8_relocs, num_a8_relocs,
5359 prev_num_a8_fixes, &stub_changed)
5361 goto error_ret_free_local;
5365 if (prev_num_a8_fixes != num_a8_fixes)
5366 stub_changed = TRUE;
5371 /* OK, we've added some stubs. Find out the new size of the
5373 for (stub_sec = htab->stub_bfd->sections;
5375 stub_sec = stub_sec->next)
5377 /* Ignore non-stub sections. */
5378 if (!strstr (stub_sec->name, STUB_SUFFIX))
5384 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5386 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5387 if (htab->fix_cortex_a8)
5388 for (i = 0; i < num_a8_fixes; i++)
5390 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5391 a8_fixes[i].section, htab);
5393 if (stub_sec == NULL)
5394 goto error_ret_free_local;
5397 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5402 /* Ask the linker to do its stuff. */
5403 (*htab->layout_sections_again) ();
5406 /* Add stubs for Cortex-A8 erratum fixes now. */
5407 if (htab->fix_cortex_a8)
5409 for (i = 0; i < num_a8_fixes; i++)
5411 struct elf32_arm_stub_hash_entry *stub_entry;
5412 char *stub_name = a8_fixes[i].stub_name;
5413 asection *section = a8_fixes[i].section;
5414 unsigned int section_id = a8_fixes[i].section->id;
5415 asection *link_sec = htab->stub_group[section_id].link_sec;
5416 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5417 const insn_sequence *template_sequence;
5418 int template_size, size = 0;
5420 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5422 if (stub_entry == NULL)
5424 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5430 stub_entry->stub_sec = stub_sec;
5431 stub_entry->stub_offset = 0;
5432 stub_entry->id_sec = link_sec;
5433 stub_entry->stub_type = a8_fixes[i].stub_type;
5434 stub_entry->target_section = a8_fixes[i].section;
5435 stub_entry->target_value = a8_fixes[i].offset;
5436 stub_entry->target_addend = a8_fixes[i].addend;
5437 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5438 stub_entry->branch_type = a8_fixes[i].branch_type;
5440 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5444 stub_entry->stub_size = size;
5445 stub_entry->stub_template = template_sequence;
5446 stub_entry->stub_template_size = template_size;
5449 /* Stash the Cortex-A8 erratum fix array for use later in
5450 elf32_arm_write_section(). */
5451 htab->a8_erratum_fixes = a8_fixes;
5452 htab->num_a8_erratum_fixes = num_a8_fixes;
5456 htab->a8_erratum_fixes = NULL;
5457 htab->num_a8_erratum_fixes = 0;
5461 error_ret_free_local:
5465 /* Build all the stubs associated with the current output file. The
5466 stubs are kept in a hash table attached to the main linker hash
5467 table. We also set up the .plt entries for statically linked PIC
5468 functions here. This function is called via arm_elf_finish in the
5472 elf32_arm_build_stubs (struct bfd_link_info *info)
5475 struct bfd_hash_table *table;
5476 struct elf32_arm_link_hash_table *htab;
5478 htab = elf32_arm_hash_table (info);
5482 for (stub_sec = htab->stub_bfd->sections;
5484 stub_sec = stub_sec->next)
5488 /* Ignore non-stub sections. */
5489 if (!strstr (stub_sec->name, STUB_SUFFIX))
5492 /* Allocate memory to hold the linker stubs. */
5493 size = stub_sec->size;
5494 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5495 if (stub_sec->contents == NULL && size != 0)
5500 /* Build the stubs as directed by the stub hash table. */
5501 table = &htab->stub_hash_table;
5502 bfd_hash_traverse (table, arm_build_one_stub, info);
5503 if (htab->fix_cortex_a8)
5505 /* Place the cortex a8 stubs last. */
5506 htab->fix_cortex_a8 = -1;
5507 bfd_hash_traverse (table, arm_build_one_stub, info);
5513 /* Locate the Thumb encoded calling stub for NAME. */
5515 static struct elf_link_hash_entry *
5516 find_thumb_glue (struct bfd_link_info *link_info,
5518 char **error_message)
5521 struct elf_link_hash_entry *hash;
5522 struct elf32_arm_link_hash_table *hash_table;
5524 /* We need a pointer to the armelf specific hash table. */
5525 hash_table = elf32_arm_hash_table (link_info);
5526 if (hash_table == NULL)
5529 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5530 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5532 BFD_ASSERT (tmp_name);
5534 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5536 hash = elf_link_hash_lookup
5537 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5540 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5541 tmp_name, name) == -1)
5542 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5549 /* Locate the ARM encoded calling stub for NAME. */
5551 static struct elf_link_hash_entry *
5552 find_arm_glue (struct bfd_link_info *link_info,
5554 char **error_message)
5557 struct elf_link_hash_entry *myh;
5558 struct elf32_arm_link_hash_table *hash_table;
5560 /* We need a pointer to the elfarm specific hash table. */
5561 hash_table = elf32_arm_hash_table (link_info);
5562 if (hash_table == NULL)
5565 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5566 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5568 BFD_ASSERT (tmp_name);
5570 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5572 myh = elf_link_hash_lookup
5573 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5576 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5577 tmp_name, name) == -1)
5578 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5585 /* ARM->Thumb glue (static images):
5589 ldr r12, __func_addr
5592 .word func @ behave as if you saw a ARM_32 reloc.
5599 .word func @ behave as if you saw a ARM_32 reloc.
5601 (relocatable images)
5604 ldr r12, __func_offset
5610 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5611 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5612 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5613 static const insn32 a2t3_func_addr_insn = 0x00000001;
5615 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5616 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5617 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5619 #define ARM2THUMB_PIC_GLUE_SIZE 16
5620 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5621 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5622 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5624 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5628 __func_from_thumb: __func_from_thumb:
5630 nop ldr r6, __func_addr
5640 #define THUMB2ARM_GLUE_SIZE 8
5641 static const insn16 t2a1_bx_pc_insn = 0x4778;
5642 static const insn16 t2a2_noop_insn = 0x46c0;
5643 static const insn32 t2a3_b_insn = 0xea000000;
5645 #define VFP11_ERRATUM_VENEER_SIZE 8
5647 #define ARM_BX_VENEER_SIZE 12
5648 static const insn32 armbx1_tst_insn = 0xe3100001;
5649 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5650 static const insn32 armbx3_bx_insn = 0xe12fff10;
5652 #ifndef ELFARM_NABI_C_INCLUDED
5654 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5657 bfd_byte * contents;
5661 /* Do not include empty glue sections in the output. */
5664 s = bfd_get_linker_section (abfd, name);
5666 s->flags |= SEC_EXCLUDE;
5671 BFD_ASSERT (abfd != NULL);
5673 s = bfd_get_linker_section (abfd, name);
5674 BFD_ASSERT (s != NULL);
5676 contents = (bfd_byte *) bfd_alloc (abfd, size);
5678 BFD_ASSERT (s->size == size);
5679 s->contents = contents;
5683 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5685 struct elf32_arm_link_hash_table * globals;
5687 globals = elf32_arm_hash_table (info);
5688 BFD_ASSERT (globals != NULL);
5690 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5691 globals->arm_glue_size,
5692 ARM2THUMB_GLUE_SECTION_NAME);
5694 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5695 globals->thumb_glue_size,
5696 THUMB2ARM_GLUE_SECTION_NAME);
5698 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5699 globals->vfp11_erratum_glue_size,
5700 VFP11_ERRATUM_VENEER_SECTION_NAME);
5702 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5703 globals->bx_glue_size,
5704 ARM_BX_GLUE_SECTION_NAME);
5709 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5710 returns the symbol identifying the stub. */
5712 static struct elf_link_hash_entry *
5713 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5714 struct elf_link_hash_entry * h)
5716 const char * name = h->root.root.string;
5719 struct elf_link_hash_entry * myh;
5720 struct bfd_link_hash_entry * bh;
5721 struct elf32_arm_link_hash_table * globals;
5725 globals = elf32_arm_hash_table (link_info);
5726 BFD_ASSERT (globals != NULL);
5727 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5729 s = bfd_get_linker_section
5730 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5732 BFD_ASSERT (s != NULL);
5734 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5735 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5737 BFD_ASSERT (tmp_name);
5739 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5741 myh = elf_link_hash_lookup
5742 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5746 /* We've already seen this guy. */
5751 /* The only trick here is using hash_table->arm_glue_size as the value.
5752 Even though the section isn't allocated yet, this is where we will be
5753 putting it. The +1 on the value marks that the stub has not been
5754 output yet - not that it is a Thumb function. */
5756 val = globals->arm_glue_size + 1;
5757 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5758 tmp_name, BSF_GLOBAL, s, val,
5759 NULL, TRUE, FALSE, &bh);
5761 myh = (struct elf_link_hash_entry *) bh;
5762 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5763 myh->forced_local = 1;
5767 if (link_info->shared || globals->root.is_relocatable_executable
5768 || globals->pic_veneer)
5769 size = ARM2THUMB_PIC_GLUE_SIZE;
5770 else if (globals->use_blx)
5771 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5773 size = ARM2THUMB_STATIC_GLUE_SIZE;
5776 globals->arm_glue_size += size;
5781 /* Allocate space for ARMv4 BX veneers. */
5784 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5787 struct elf32_arm_link_hash_table *globals;
5789 struct elf_link_hash_entry *myh;
5790 struct bfd_link_hash_entry *bh;
5793 /* BX PC does not need a veneer. */
5797 globals = elf32_arm_hash_table (link_info);
5798 BFD_ASSERT (globals != NULL);
5799 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5801 /* Check if this veneer has already been allocated. */
5802 if (globals->bx_glue_offset[reg])
5805 s = bfd_get_linker_section
5806 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5808 BFD_ASSERT (s != NULL);
5810 /* Add symbol for veneer. */
5812 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5814 BFD_ASSERT (tmp_name);
5816 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5818 myh = elf_link_hash_lookup
5819 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5821 BFD_ASSERT (myh == NULL);
5824 val = globals->bx_glue_size;
5825 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5826 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5827 NULL, TRUE, FALSE, &bh);
5829 myh = (struct elf_link_hash_entry *) bh;
5830 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5831 myh->forced_local = 1;
5833 s->size += ARM_BX_VENEER_SIZE;
5834 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5835 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5839 /* Add an entry to the code/data map for section SEC. */
5842 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5844 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5845 unsigned int newidx;
5847 if (sec_data->map == NULL)
5849 sec_data->map = (elf32_arm_section_map *)
5850 bfd_malloc (sizeof (elf32_arm_section_map));
5851 sec_data->mapcount = 0;
5852 sec_data->mapsize = 1;
5855 newidx = sec_data->mapcount++;
5857 if (sec_data->mapcount > sec_data->mapsize)
5859 sec_data->mapsize *= 2;
5860 sec_data->map = (elf32_arm_section_map *)
5861 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5862 * sizeof (elf32_arm_section_map));
5867 sec_data->map[newidx].vma = vma;
5868 sec_data->map[newidx].type = type;
5873 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5874 veneers are handled for now. */
5877 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5878 elf32_vfp11_erratum_list *branch,
5880 asection *branch_sec,
5881 unsigned int offset)
5884 struct elf32_arm_link_hash_table *hash_table;
5886 struct elf_link_hash_entry *myh;
5887 struct bfd_link_hash_entry *bh;
5889 struct _arm_elf_section_data *sec_data;
5890 elf32_vfp11_erratum_list *newerr;
5892 hash_table = elf32_arm_hash_table (link_info);
5893 BFD_ASSERT (hash_table != NULL);
5894 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5896 s = bfd_get_linker_section
5897 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5899 sec_data = elf32_arm_section_data (s);
5901 BFD_ASSERT (s != NULL);
5903 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5904 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5906 BFD_ASSERT (tmp_name);
5908 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5909 hash_table->num_vfp11_fixes);
5911 myh = elf_link_hash_lookup
5912 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5914 BFD_ASSERT (myh == NULL);
5917 val = hash_table->vfp11_erratum_glue_size;
5918 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5919 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5920 NULL, TRUE, FALSE, &bh);
5922 myh = (struct elf_link_hash_entry *) bh;
5923 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5924 myh->forced_local = 1;
5926 /* Link veneer back to calling location. */
5927 sec_data->erratumcount += 1;
5928 newerr = (elf32_vfp11_erratum_list *)
5929 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5931 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5933 newerr->u.v.branch = branch;
5934 newerr->u.v.id = hash_table->num_vfp11_fixes;
5935 branch->u.b.veneer = newerr;
5937 newerr->next = sec_data->erratumlist;
5938 sec_data->erratumlist = newerr;
5940 /* A symbol for the return from the veneer. */
5941 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5942 hash_table->num_vfp11_fixes);
5944 myh = elf_link_hash_lookup
5945 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5952 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5953 branch_sec, val, NULL, TRUE, FALSE, &bh);
5955 myh = (struct elf_link_hash_entry *) bh;
5956 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5957 myh->forced_local = 1;
5961 /* Generate a mapping symbol for the veneer section, and explicitly add an
5962 entry for that symbol to the code/data map for the section. */
5963 if (hash_table->vfp11_erratum_glue_size == 0)
5966 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5967 ever requires this erratum fix. */
5968 _bfd_generic_link_add_one_symbol (link_info,
5969 hash_table->bfd_of_glue_owner, "$a",
5970 BSF_LOCAL, s, 0, NULL,
5973 myh = (struct elf_link_hash_entry *) bh;
5974 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5975 myh->forced_local = 1;
5977 /* The elf32_arm_init_maps function only cares about symbols from input
5978 BFDs. We must make a note of this generated mapping symbol
5979 ourselves so that code byteswapping works properly in
5980 elf32_arm_write_section. */
5981 elf32_arm_section_map_add (s, 'a', 0);
5984 s->size += VFP11_ERRATUM_VENEER_SIZE;
5985 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5986 hash_table->num_vfp11_fixes++;
5988 /* The offset of the veneer. */
5992 #define ARM_GLUE_SECTION_FLAGS \
5993 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5994 | SEC_READONLY | SEC_LINKER_CREATED)
5996 /* Create a fake section for use by the ARM backend of the linker. */
5999 arm_make_glue_section (bfd * abfd, const char * name)
6003 sec = bfd_get_linker_section (abfd, name);
6008 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6011 || !bfd_set_section_alignment (abfd, sec, 2))
6014 /* Set the gc mark to prevent the section from being removed by garbage
6015 collection, despite the fact that no relocs refer to this section. */
6021 /* Add the glue sections to ABFD. This function is called from the
6022 linker scripts in ld/emultempl/{armelf}.em. */
6025 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6026 struct bfd_link_info *info)
6028 /* If we are only performing a partial
6029 link do not bother adding the glue. */
6030 if (info->relocatable)
6033 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6034 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6035 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6036 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6039 /* Select a BFD to be used to hold the sections used by the glue code.
6040 This function is called from the linker scripts in ld/emultempl/
6044 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6046 struct elf32_arm_link_hash_table *globals;
6048 /* If we are only performing a partial link
6049 do not bother getting a bfd to hold the glue. */
6050 if (info->relocatable)
6053 /* Make sure we don't attach the glue sections to a dynamic object. */
6054 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6056 globals = elf32_arm_hash_table (info);
6057 BFD_ASSERT (globals != NULL);
6059 if (globals->bfd_of_glue_owner != NULL)
6062 /* Save the bfd for later use. */
6063 globals->bfd_of_glue_owner = abfd;
6069 check_use_blx (struct elf32_arm_link_hash_table *globals)
6073 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6076 if (globals->fix_arm1176)
6078 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6079 globals->use_blx = 1;
6083 if (cpu_arch > TAG_CPU_ARCH_V4T)
6084 globals->use_blx = 1;
6089 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6090 struct bfd_link_info *link_info)
6092 Elf_Internal_Shdr *symtab_hdr;
6093 Elf_Internal_Rela *internal_relocs = NULL;
6094 Elf_Internal_Rela *irel, *irelend;
6095 bfd_byte *contents = NULL;
6098 struct elf32_arm_link_hash_table *globals;
6100 /* If we are only performing a partial link do not bother
6101 to construct any glue. */
6102 if (link_info->relocatable)
6105 /* Here we have a bfd that is to be included on the link. We have a
6106 hook to do reloc rummaging, before section sizes are nailed down. */
6107 globals = elf32_arm_hash_table (link_info);
6108 BFD_ASSERT (globals != NULL);
6110 check_use_blx (globals);
6112 if (globals->byteswap_code && !bfd_big_endian (abfd))
6114 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6119 /* PR 5398: If we have not decided to include any loadable sections in
6120 the output then we will not have a glue owner bfd. This is OK, it
6121 just means that there is nothing else for us to do here. */
6122 if (globals->bfd_of_glue_owner == NULL)
6125 /* Rummage around all the relocs and map the glue vectors. */
6126 sec = abfd->sections;
6131 for (; sec != NULL; sec = sec->next)
6133 if (sec->reloc_count == 0)
6136 if ((sec->flags & SEC_EXCLUDE) != 0)
6139 symtab_hdr = & elf_symtab_hdr (abfd);
6141 /* Load the relocs. */
6143 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6145 if (internal_relocs == NULL)
6148 irelend = internal_relocs + sec->reloc_count;
6149 for (irel = internal_relocs; irel < irelend; irel++)
6152 unsigned long r_index;
6154 struct elf_link_hash_entry *h;
6156 r_type = ELF32_R_TYPE (irel->r_info);
6157 r_index = ELF32_R_SYM (irel->r_info);
6159 /* These are the only relocation types we care about. */
6160 if ( r_type != R_ARM_PC24
6161 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6164 /* Get the section contents if we haven't done so already. */
6165 if (contents == NULL)
6167 /* Get cached copy if it exists. */
6168 if (elf_section_data (sec)->this_hdr.contents != NULL)
6169 contents = elf_section_data (sec)->this_hdr.contents;
6172 /* Go get them off disk. */
6173 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6178 if (r_type == R_ARM_V4BX)
6182 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6183 record_arm_bx_glue (link_info, reg);
6187 /* If the relocation is not against a symbol it cannot concern us. */
6190 /* We don't care about local symbols. */
6191 if (r_index < symtab_hdr->sh_info)
6194 /* This is an external symbol. */
6195 r_index -= symtab_hdr->sh_info;
6196 h = (struct elf_link_hash_entry *)
6197 elf_sym_hashes (abfd)[r_index];
6199 /* If the relocation is against a static symbol it must be within
6200 the current section and so cannot be a cross ARM/Thumb relocation. */
6204 /* If the call will go through a PLT entry then we do not need
6206 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6212 /* This one is a call from arm code. We need to look up
6213 the target of the call. If it is a thumb target, we
6215 if (h->target_internal == ST_BRANCH_TO_THUMB)
6216 record_arm_to_thumb_glue (link_info, h);
6224 if (contents != NULL
6225 && elf_section_data (sec)->this_hdr.contents != contents)
6229 if (internal_relocs != NULL
6230 && elf_section_data (sec)->relocs != internal_relocs)
6231 free (internal_relocs);
6232 internal_relocs = NULL;
6238 if (contents != NULL
6239 && elf_section_data (sec)->this_hdr.contents != contents)
6241 if (internal_relocs != NULL
6242 && elf_section_data (sec)->relocs != internal_relocs)
6243 free (internal_relocs);
6250 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6253 bfd_elf32_arm_init_maps (bfd *abfd)
6255 Elf_Internal_Sym *isymbuf;
6256 Elf_Internal_Shdr *hdr;
6257 unsigned int i, localsyms;
6259 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6260 if (! is_arm_elf (abfd))
6263 if ((abfd->flags & DYNAMIC) != 0)
6266 hdr = & elf_symtab_hdr (abfd);
6267 localsyms = hdr->sh_info;
6269 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6270 should contain the number of local symbols, which should come before any
6271 global symbols. Mapping symbols are always local. */
6272 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6275 /* No internal symbols read? Skip this BFD. */
6276 if (isymbuf == NULL)
6279 for (i = 0; i < localsyms; i++)
6281 Elf_Internal_Sym *isym = &isymbuf[i];
6282 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6286 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6288 name = bfd_elf_string_from_elf_section (abfd,
6289 hdr->sh_link, isym->st_name);
6291 if (bfd_is_arm_special_symbol_name (name,
6292 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6293 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6299 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6300 say what they wanted. */
6303 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6305 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6306 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6308 if (globals == NULL)
6311 if (globals->fix_cortex_a8 == -1)
6313 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6314 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6315 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6316 || out_attr[Tag_CPU_arch_profile].i == 0))
6317 globals->fix_cortex_a8 = 1;
6319 globals->fix_cortex_a8 = 0;
6325 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6327 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6328 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6330 if (globals == NULL)
6332 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6333 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6335 switch (globals->vfp11_fix)
6337 case BFD_ARM_VFP11_FIX_DEFAULT:
6338 case BFD_ARM_VFP11_FIX_NONE:
6339 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6343 /* Give a warning, but do as the user requests anyway. */
6344 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6345 "workaround is not necessary for target architecture"), obfd);
6348 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6349 /* For earlier architectures, we might need the workaround, but do not
6350 enable it by default. If users is running with broken hardware, they
6351 must enable the erratum fix explicitly. */
6352 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6356 enum bfd_arm_vfp11_pipe
6364 /* Return a VFP register number. This is encoded as RX:X for single-precision
6365 registers, or X:RX for double-precision registers, where RX is the group of
6366 four bits in the instruction encoding and X is the single extension bit.
6367 RX and X fields are specified using their lowest (starting) bit. The return
6370 0...31: single-precision registers s0...s31
6371 32...63: double-precision registers d0...d31.
6373 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6374 encounter VFP3 instructions, so we allow the full range for DP registers. */
6377 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6381 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6383 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6386 /* Set bits in *WMASK according to a register number REG as encoded by
6387 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6390 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6395 *wmask |= 3 << ((reg - 32) * 2);
6398 /* Return TRUE if WMASK overwrites anything in REGS. */
6401 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6405 for (i = 0; i < numregs; i++)
6407 unsigned int reg = regs[i];
6409 if (reg < 32 && (wmask & (1 << reg)) != 0)
6417 if ((wmask & (3 << (reg * 2))) != 0)
6424 /* In this function, we're interested in two things: finding input registers
6425 for VFP data-processing instructions, and finding the set of registers which
6426 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6427 hold the written set, so FLDM etc. are easy to deal with (we're only
6428 interested in 32 SP registers or 16 dp registers, due to the VFP version
6429 implemented by the chip in question). DP registers are marked by setting
6430 both SP registers in the write mask). */
6432 static enum bfd_arm_vfp11_pipe
6433 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6436 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6437 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6439 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6442 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6443 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6445 pqrs = ((insn & 0x00800000) >> 20)
6446 | ((insn & 0x00300000) >> 19)
6447 | ((insn & 0x00000040) >> 6);
6451 case 0: /* fmac[sd]. */
6452 case 1: /* fnmac[sd]. */
6453 case 2: /* fmsc[sd]. */
6454 case 3: /* fnmsc[sd]. */
6456 bfd_arm_vfp11_write_mask (destmask, fd);
6458 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6463 case 4: /* fmul[sd]. */
6464 case 5: /* fnmul[sd]. */
6465 case 6: /* fadd[sd]. */
6466 case 7: /* fsub[sd]. */
6470 case 8: /* fdiv[sd]. */
6473 bfd_arm_vfp11_write_mask (destmask, fd);
6474 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6479 case 15: /* extended opcode. */
6481 unsigned int extn = ((insn >> 15) & 0x1e)
6482 | ((insn >> 7) & 1);
6486 case 0: /* fcpy[sd]. */
6487 case 1: /* fabs[sd]. */
6488 case 2: /* fneg[sd]. */
6489 case 8: /* fcmp[sd]. */
6490 case 9: /* fcmpe[sd]. */
6491 case 10: /* fcmpz[sd]. */
6492 case 11: /* fcmpez[sd]. */
6493 case 16: /* fuito[sd]. */
6494 case 17: /* fsito[sd]. */
6495 case 24: /* ftoui[sd]. */
6496 case 25: /* ftouiz[sd]. */
6497 case 26: /* ftosi[sd]. */
6498 case 27: /* ftosiz[sd]. */
6499 /* These instructions will not bounce due to underflow. */
6504 case 3: /* fsqrt[sd]. */
6505 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6506 registers to cause the erratum in previous instructions. */
6507 bfd_arm_vfp11_write_mask (destmask, fd);
6511 case 15: /* fcvt{ds,sd}. */
6515 bfd_arm_vfp11_write_mask (destmask, fd);
6517 /* Only FCVTSD can underflow. */
6518 if ((insn & 0x100) != 0)
6537 /* Two-register transfer. */
6538 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6540 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6542 if ((insn & 0x100000) == 0)
6545 bfd_arm_vfp11_write_mask (destmask, fm);
6548 bfd_arm_vfp11_write_mask (destmask, fm);
6549 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6555 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6557 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6558 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6562 case 0: /* Two-reg transfer. We should catch these above. */
6565 case 2: /* fldm[sdx]. */
6569 unsigned int i, offset = insn & 0xff;
6574 for (i = fd; i < fd + offset; i++)
6575 bfd_arm_vfp11_write_mask (destmask, i);
6579 case 4: /* fld[sd]. */
6581 bfd_arm_vfp11_write_mask (destmask, fd);
6590 /* Single-register transfer. Note L==0. */
6591 else if ((insn & 0x0f100e10) == 0x0e000a10)
6593 unsigned int opcode = (insn >> 21) & 7;
6594 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6598 case 0: /* fmsr/fmdlr. */
6599 case 1: /* fmdhr. */
6600 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6601 destination register. I don't know if this is exactly right,
6602 but it is the conservative choice. */
6603 bfd_arm_vfp11_write_mask (destmask, fn);
6617 static int elf32_arm_compare_mapping (const void * a, const void * b);
6620 /* Look for potentially-troublesome code sequences which might trigger the
6621 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6622 (available from ARM) for details of the erratum. A short version is
6623 described in ld.texinfo. */
6626 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6629 bfd_byte *contents = NULL;
6631 int regs[3], numregs = 0;
6632 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6633 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6635 if (globals == NULL)
6638 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6639 The states transition as follows:
6641 0 -> 1 (vector) or 0 -> 2 (scalar)
6642 A VFP FMAC-pipeline instruction has been seen. Fill
6643 regs[0]..regs[numregs-1] with its input operands. Remember this
6644 instruction in 'first_fmac'.
6647 Any instruction, except for a VFP instruction which overwrites
6652 A VFP instruction has been seen which overwrites any of regs[*].
6653 We must make a veneer! Reset state to 0 before examining next
6657 If we fail to match anything in state 2, reset to state 0 and reset
6658 the instruction pointer to the instruction after 'first_fmac'.
6660 If the VFP11 vector mode is in use, there must be at least two unrelated
6661 instructions between anti-dependent VFP11 instructions to properly avoid
6662 triggering the erratum, hence the use of the extra state 1. */
6664 /* If we are only performing a partial link do not bother
6665 to construct any glue. */
6666 if (link_info->relocatable)
6669 /* Skip if this bfd does not correspond to an ELF image. */
6670 if (! is_arm_elf (abfd))
6673 /* We should have chosen a fix type by the time we get here. */
6674 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6676 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6679 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6680 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6683 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6685 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6686 struct _arm_elf_section_data *sec_data;
6688 /* If we don't have executable progbits, we're not interested in this
6689 section. Also skip if section is to be excluded. */
6690 if (elf_section_type (sec) != SHT_PROGBITS
6691 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6692 || (sec->flags & SEC_EXCLUDE) != 0
6693 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
6694 || sec->output_section == bfd_abs_section_ptr
6695 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6698 sec_data = elf32_arm_section_data (sec);
6700 if (sec_data->mapcount == 0)
6703 if (elf_section_data (sec)->this_hdr.contents != NULL)
6704 contents = elf_section_data (sec)->this_hdr.contents;
6705 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6708 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6709 elf32_arm_compare_mapping);
6711 for (span = 0; span < sec_data->mapcount; span++)
6713 unsigned int span_start = sec_data->map[span].vma;
6714 unsigned int span_end = (span == sec_data->mapcount - 1)
6715 ? sec->size : sec_data->map[span + 1].vma;
6716 char span_type = sec_data->map[span].type;
6718 /* FIXME: Only ARM mode is supported at present. We may need to
6719 support Thumb-2 mode also at some point. */
6720 if (span_type != 'a')
6723 for (i = span_start; i < span_end;)
6725 unsigned int next_i = i + 4;
6726 unsigned int insn = bfd_big_endian (abfd)
6727 ? (contents[i] << 24)
6728 | (contents[i + 1] << 16)
6729 | (contents[i + 2] << 8)
6731 : (contents[i + 3] << 24)
6732 | (contents[i + 2] << 16)
6733 | (contents[i + 1] << 8)
6735 unsigned int writemask = 0;
6736 enum bfd_arm_vfp11_pipe vpipe;
6741 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6743 /* I'm assuming the VFP11 erratum can trigger with denorm
6744 operands on either the FMAC or the DS pipeline. This might
6745 lead to slightly overenthusiastic veneer insertion. */
6746 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6748 state = use_vector ? 1 : 2;
6750 veneer_of_insn = insn;
6756 int other_regs[3], other_numregs;
6757 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6760 if (vpipe != VFP11_BAD
6761 && bfd_arm_vfp11_antidependency (writemask, regs,
6771 int other_regs[3], other_numregs;
6772 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6775 if (vpipe != VFP11_BAD
6776 && bfd_arm_vfp11_antidependency (writemask, regs,
6782 next_i = first_fmac + 4;
6788 abort (); /* Should be unreachable. */
6793 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6794 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6796 elf32_arm_section_data (sec)->erratumcount += 1;
6798 newerr->u.b.vfp_insn = veneer_of_insn;
6803 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6810 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6815 newerr->next = sec_data->erratumlist;
6816 sec_data->erratumlist = newerr;
6825 if (contents != NULL
6826 && elf_section_data (sec)->this_hdr.contents != contents)
6834 if (contents != NULL
6835 && elf_section_data (sec)->this_hdr.contents != contents)
6841 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6842 after sections have been laid out, using specially-named symbols. */
6845 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6846 struct bfd_link_info *link_info)
6849 struct elf32_arm_link_hash_table *globals;
6852 if (link_info->relocatable)
6855 /* Skip if this bfd does not correspond to an ELF image. */
6856 if (! is_arm_elf (abfd))
6859 globals = elf32_arm_hash_table (link_info);
6860 if (globals == NULL)
6863 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6864 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6866 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6868 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6869 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6871 for (; errnode != NULL; errnode = errnode->next)
6873 struct elf_link_hash_entry *myh;
6876 switch (errnode->type)
6878 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6879 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6880 /* Find veneer symbol. */
6881 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6882 errnode->u.b.veneer->u.v.id);
6884 myh = elf_link_hash_lookup
6885 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6888 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6889 "`%s'"), abfd, tmp_name);
6891 vma = myh->root.u.def.section->output_section->vma
6892 + myh->root.u.def.section->output_offset
6893 + myh->root.u.def.value;
6895 errnode->u.b.veneer->vma = vma;
6898 case VFP11_ERRATUM_ARM_VENEER:
6899 case VFP11_ERRATUM_THUMB_VENEER:
6900 /* Find return location. */
6901 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6904 myh = elf_link_hash_lookup
6905 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6908 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6909 "`%s'"), abfd, tmp_name);
6911 vma = myh->root.u.def.section->output_section->vma
6912 + myh->root.u.def.section->output_offset
6913 + myh->root.u.def.value;
6915 errnode->u.v.branch->vma = vma;
6928 /* Set target relocation values needed during linking. */
6931 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6932 struct bfd_link_info *link_info,
6934 char * target2_type,
6937 bfd_arm_vfp11_fix vfp11_fix,
6938 int no_enum_warn, int no_wchar_warn,
6939 int pic_veneer, int fix_cortex_a8,
6942 struct elf32_arm_link_hash_table *globals;
6944 globals = elf32_arm_hash_table (link_info);
6945 if (globals == NULL)
6948 globals->target1_is_rel = target1_is_rel;
6949 if (strcmp (target2_type, "rel") == 0)
6950 globals->target2_reloc = R_ARM_REL32;
6951 else if (strcmp (target2_type, "abs") == 0)
6952 globals->target2_reloc = R_ARM_ABS32;
6953 else if (strcmp (target2_type, "got-rel") == 0)
6954 globals->target2_reloc = R_ARM_GOT_PREL;
6957 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6960 globals->fix_v4bx = fix_v4bx;
6961 globals->use_blx |= use_blx;
6962 globals->vfp11_fix = vfp11_fix;
6963 globals->pic_veneer = pic_veneer;
6964 globals->fix_cortex_a8 = fix_cortex_a8;
6965 globals->fix_arm1176 = fix_arm1176;
6967 BFD_ASSERT (is_arm_elf (output_bfd));
6968 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6969 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6972 /* Replace the target offset of a Thumb bl or b.w instruction. */
6975 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6981 BFD_ASSERT ((offset & 1) == 0);
6983 upper = bfd_get_16 (abfd, insn);
6984 lower = bfd_get_16 (abfd, insn + 2);
6985 reloc_sign = (offset < 0) ? 1 : 0;
6986 upper = (upper & ~(bfd_vma) 0x7ff)
6987 | ((offset >> 12) & 0x3ff)
6988 | (reloc_sign << 10);
6989 lower = (lower & ~(bfd_vma) 0x2fff)
6990 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6991 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6992 | ((offset >> 1) & 0x7ff);
6993 bfd_put_16 (abfd, upper, insn);
6994 bfd_put_16 (abfd, lower, insn + 2);
6997 /* Thumb code calling an ARM function. */
7000 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7004 asection * input_section,
7005 bfd_byte * hit_data,
7008 bfd_signed_vma addend,
7010 char **error_message)
7014 long int ret_offset;
7015 struct elf_link_hash_entry * myh;
7016 struct elf32_arm_link_hash_table * globals;
7018 myh = find_thumb_glue (info, name, error_message);
7022 globals = elf32_arm_hash_table (info);
7023 BFD_ASSERT (globals != NULL);
7024 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7026 my_offset = myh->root.u.def.value;
7028 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7029 THUMB2ARM_GLUE_SECTION_NAME);
7031 BFD_ASSERT (s != NULL);
7032 BFD_ASSERT (s->contents != NULL);
7033 BFD_ASSERT (s->output_section != NULL);
7035 if ((my_offset & 0x01) == 0x01)
7038 && sym_sec->owner != NULL
7039 && !INTERWORK_FLAG (sym_sec->owner))
7041 (*_bfd_error_handler)
7042 (_("%B(%s): warning: interworking not enabled.\n"
7043 " first occurrence: %B: Thumb call to ARM"),
7044 sym_sec->owner, input_bfd, name);
7050 myh->root.u.def.value = my_offset;
7052 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7053 s->contents + my_offset);
7055 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7056 s->contents + my_offset + 2);
7059 /* Address of destination of the stub. */
7060 ((bfd_signed_vma) val)
7062 /* Offset from the start of the current section
7063 to the start of the stubs. */
7065 /* Offset of the start of this stub from the start of the stubs. */
7067 /* Address of the start of the current section. */
7068 + s->output_section->vma)
7069 /* The branch instruction is 4 bytes into the stub. */
7071 /* ARM branches work from the pc of the instruction + 8. */
7074 put_arm_insn (globals, output_bfd,
7075 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7076 s->contents + my_offset + 4);
7079 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7081 /* Now go back and fix up the original BL insn to point to here. */
7083 /* Address of where the stub is located. */
7084 (s->output_section->vma + s->output_offset + my_offset)
7085 /* Address of where the BL is located. */
7086 - (input_section->output_section->vma + input_section->output_offset
7088 /* Addend in the relocation. */
7090 /* Biassing for PC-relative addressing. */
7093 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7098 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7100 static struct elf_link_hash_entry *
7101 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7108 char ** error_message)
7111 long int ret_offset;
7112 struct elf_link_hash_entry * myh;
7113 struct elf32_arm_link_hash_table * globals;
7115 myh = find_arm_glue (info, name, error_message);
7119 globals = elf32_arm_hash_table (info);
7120 BFD_ASSERT (globals != NULL);
7121 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7123 my_offset = myh->root.u.def.value;
7125 if ((my_offset & 0x01) == 0x01)
7128 && sym_sec->owner != NULL
7129 && !INTERWORK_FLAG (sym_sec->owner))
7131 (*_bfd_error_handler)
7132 (_("%B(%s): warning: interworking not enabled.\n"
7133 " first occurrence: %B: arm call to thumb"),
7134 sym_sec->owner, input_bfd, name);
7138 myh->root.u.def.value = my_offset;
7140 if (info->shared || globals->root.is_relocatable_executable
7141 || globals->pic_veneer)
7143 /* For relocatable objects we can't use absolute addresses,
7144 so construct the address from a relative offset. */
7145 /* TODO: If the offset is small it's probably worth
7146 constructing the address with adds. */
7147 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7148 s->contents + my_offset);
7149 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7150 s->contents + my_offset + 4);
7151 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7152 s->contents + my_offset + 8);
7153 /* Adjust the offset by 4 for the position of the add,
7154 and 8 for the pipeline offset. */
7155 ret_offset = (val - (s->output_offset
7156 + s->output_section->vma
7159 bfd_put_32 (output_bfd, ret_offset,
7160 s->contents + my_offset + 12);
7162 else if (globals->use_blx)
7164 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7165 s->contents + my_offset);
7167 /* It's a thumb address. Add the low order bit. */
7168 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7169 s->contents + my_offset + 4);
7173 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7174 s->contents + my_offset);
7176 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7177 s->contents + my_offset + 4);
7179 /* It's a thumb address. Add the low order bit. */
7180 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7181 s->contents + my_offset + 8);
7187 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7192 /* Arm code calling a Thumb function. */
7195 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7199 asection * input_section,
7200 bfd_byte * hit_data,
7203 bfd_signed_vma addend,
7205 char **error_message)
7207 unsigned long int tmp;
7210 long int ret_offset;
7211 struct elf_link_hash_entry * myh;
7212 struct elf32_arm_link_hash_table * globals;
7214 globals = elf32_arm_hash_table (info);
7215 BFD_ASSERT (globals != NULL);
7216 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7218 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7219 ARM2THUMB_GLUE_SECTION_NAME);
7220 BFD_ASSERT (s != NULL);
7221 BFD_ASSERT (s->contents != NULL);
7222 BFD_ASSERT (s->output_section != NULL);
7224 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7225 sym_sec, val, s, error_message);
7229 my_offset = myh->root.u.def.value;
7230 tmp = bfd_get_32 (input_bfd, hit_data);
7231 tmp = tmp & 0xFF000000;
7233 /* Somehow these are both 4 too far, so subtract 8. */
7234 ret_offset = (s->output_offset
7236 + s->output_section->vma
7237 - (input_section->output_offset
7238 + input_section->output_section->vma
7242 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7244 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7249 /* Populate Arm stub for an exported Thumb function. */
7252 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7254 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7256 struct elf_link_hash_entry * myh;
7257 struct elf32_arm_link_hash_entry *eh;
7258 struct elf32_arm_link_hash_table * globals;
7261 char *error_message;
7263 eh = elf32_arm_hash_entry (h);
7264 /* Allocate stubs for exported Thumb functions on v4t. */
7265 if (eh->export_glue == NULL)
7268 globals = elf32_arm_hash_table (info);
7269 BFD_ASSERT (globals != NULL);
7270 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7272 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7273 ARM2THUMB_GLUE_SECTION_NAME);
7274 BFD_ASSERT (s != NULL);
7275 BFD_ASSERT (s->contents != NULL);
7276 BFD_ASSERT (s->output_section != NULL);
7278 sec = eh->export_glue->root.u.def.section;
7280 BFD_ASSERT (sec->output_section != NULL);
7282 val = eh->export_glue->root.u.def.value + sec->output_offset
7283 + sec->output_section->vma;
7285 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7286 h->root.u.def.section->owner,
7287 globals->obfd, sec, val, s,
7293 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7296 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7301 struct elf32_arm_link_hash_table *globals;
7303 globals = elf32_arm_hash_table (info);
7304 BFD_ASSERT (globals != NULL);
7305 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7307 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7308 ARM_BX_GLUE_SECTION_NAME);
7309 BFD_ASSERT (s != NULL);
7310 BFD_ASSERT (s->contents != NULL);
7311 BFD_ASSERT (s->output_section != NULL);
7313 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7315 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7317 if ((globals->bx_glue_offset[reg] & 1) == 0)
7319 p = s->contents + glue_addr;
7320 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7321 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7322 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7323 globals->bx_glue_offset[reg] |= 1;
7326 return glue_addr + s->output_section->vma + s->output_offset;
7329 /* Generate Arm stubs for exported Thumb symbols. */
7331 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7332 struct bfd_link_info *link_info)
7334 struct elf32_arm_link_hash_table * globals;
7336 if (link_info == NULL)
7337 /* Ignore this if we are not called by the ELF backend linker. */
7340 globals = elf32_arm_hash_table (link_info);
7341 if (globals == NULL)
7344 /* If blx is available then exported Thumb symbols are OK and there is
7346 if (globals->use_blx)
7349 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7353 /* Reserve space for COUNT dynamic relocations in relocation selection
7357 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7358 bfd_size_type count)
7360 struct elf32_arm_link_hash_table *htab;
7362 htab = elf32_arm_hash_table (info);
7363 BFD_ASSERT (htab->root.dynamic_sections_created);
7366 sreloc->size += RELOC_SIZE (htab) * count;
7369 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7370 dynamic, the relocations should go in SRELOC, otherwise they should
7371 go in the special .rel.iplt section. */
7374 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7375 bfd_size_type count)
7377 struct elf32_arm_link_hash_table *htab;
7379 htab = elf32_arm_hash_table (info);
7380 if (!htab->root.dynamic_sections_created)
7381 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7384 BFD_ASSERT (sreloc != NULL);
7385 sreloc->size += RELOC_SIZE (htab) * count;
7389 /* Add relocation REL to the end of relocation section SRELOC. */
7392 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7393 asection *sreloc, Elf_Internal_Rela *rel)
7396 struct elf32_arm_link_hash_table *htab;
7398 htab = elf32_arm_hash_table (info);
7399 if (!htab->root.dynamic_sections_created
7400 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7401 sreloc = htab->root.irelplt;
7404 loc = sreloc->contents;
7405 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7406 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7408 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7411 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7412 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7416 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7417 bfd_boolean is_iplt_entry,
7418 union gotplt_union *root_plt,
7419 struct arm_plt_info *arm_plt)
7421 struct elf32_arm_link_hash_table *htab;
7425 htab = elf32_arm_hash_table (info);
7429 splt = htab->root.iplt;
7430 sgotplt = htab->root.igotplt;
7432 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7433 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7437 splt = htab->root.splt;
7438 sgotplt = htab->root.sgotplt;
7440 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7441 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7443 /* If this is the first .plt entry, make room for the special
7445 if (splt->size == 0)
7446 splt->size += htab->plt_header_size;
7449 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7450 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7451 splt->size += PLT_THUMB_STUB_SIZE;
7452 root_plt->offset = splt->size;
7453 splt->size += htab->plt_entry_size;
7455 if (!htab->symbian_p)
7457 /* We also need to make an entry in the .got.plt section, which
7458 will be placed in the .got section by the linker script. */
7459 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7465 arm_movw_immediate (bfd_vma value)
7467 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
7471 arm_movt_immediate (bfd_vma value)
7473 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
7476 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7477 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7478 Otherwise, DYNINDX is the index of the symbol in the dynamic
7479 symbol table and SYM_VALUE is undefined.
7481 ROOT_PLT points to the offset of the PLT entry from the start of its
7482 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7483 bookkeeping information. */
7486 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7487 union gotplt_union *root_plt,
7488 struct arm_plt_info *arm_plt,
7489 int dynindx, bfd_vma sym_value)
7491 struct elf32_arm_link_hash_table *htab;
7497 Elf_Internal_Rela rel;
7498 bfd_vma plt_header_size;
7499 bfd_vma got_header_size;
7501 htab = elf32_arm_hash_table (info);
7503 /* Pick the appropriate sections and sizes. */
7506 splt = htab->root.iplt;
7507 sgot = htab->root.igotplt;
7508 srel = htab->root.irelplt;
7510 /* There are no reserved entries in .igot.plt, and no special
7511 first entry in .iplt. */
7512 got_header_size = 0;
7513 plt_header_size = 0;
7517 splt = htab->root.splt;
7518 sgot = htab->root.sgotplt;
7519 srel = htab->root.srelplt;
7521 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7522 plt_header_size = htab->plt_header_size;
7524 BFD_ASSERT (splt != NULL && srel != NULL);
7526 /* Fill in the entry in the procedure linkage table. */
7527 if (htab->symbian_p)
7529 BFD_ASSERT (dynindx >= 0);
7530 put_arm_insn (htab, output_bfd,
7531 elf32_arm_symbian_plt_entry[0],
7532 splt->contents + root_plt->offset);
7533 bfd_put_32 (output_bfd,
7534 elf32_arm_symbian_plt_entry[1],
7535 splt->contents + root_plt->offset + 4);
7537 /* Fill in the entry in the .rel.plt section. */
7538 rel.r_offset = (splt->output_section->vma
7539 + splt->output_offset
7540 + root_plt->offset + 4);
7541 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7543 /* Get the index in the procedure linkage table which
7544 corresponds to this symbol. This is the index of this symbol
7545 in all the symbols for which we are making plt entries. The
7546 first entry in the procedure linkage table is reserved. */
7547 plt_index = ((root_plt->offset - plt_header_size)
7548 / htab->plt_entry_size);
7552 bfd_vma got_offset, got_address, plt_address;
7553 bfd_vma got_displacement, initial_got_entry;
7556 BFD_ASSERT (sgot != NULL);
7558 /* Get the offset into the .(i)got.plt table of the entry that
7559 corresponds to this function. */
7560 got_offset = (arm_plt->got_offset & -2);
7562 /* Get the index in the procedure linkage table which
7563 corresponds to this symbol. This is the index of this symbol
7564 in all the symbols for which we are making plt entries.
7565 After the reserved .got.plt entries, all symbols appear in
7566 the same order as in .plt. */
7567 plt_index = (got_offset - got_header_size) / 4;
7569 /* Calculate the address of the GOT entry. */
7570 got_address = (sgot->output_section->vma
7571 + sgot->output_offset
7574 /* ...and the address of the PLT entry. */
7575 plt_address = (splt->output_section->vma
7576 + splt->output_offset
7577 + root_plt->offset);
7579 ptr = splt->contents + root_plt->offset;
7580 if (htab->vxworks_p && info->shared)
7585 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7587 val = elf32_arm_vxworks_shared_plt_entry[i];
7589 val |= got_address - sgot->output_section->vma;
7591 val |= plt_index * RELOC_SIZE (htab);
7592 if (i == 2 || i == 5)
7593 bfd_put_32 (output_bfd, val, ptr);
7595 put_arm_insn (htab, output_bfd, val, ptr);
7598 else if (htab->vxworks_p)
7603 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7605 val = elf32_arm_vxworks_exec_plt_entry[i];
7609 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7611 val |= plt_index * RELOC_SIZE (htab);
7612 if (i == 2 || i == 5)
7613 bfd_put_32 (output_bfd, val, ptr);
7615 put_arm_insn (htab, output_bfd, val, ptr);
7618 loc = (htab->srelplt2->contents
7619 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7621 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7622 referencing the GOT for this PLT entry. */
7623 rel.r_offset = plt_address + 8;
7624 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7625 rel.r_addend = got_offset;
7626 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7627 loc += RELOC_SIZE (htab);
7629 /* Create the R_ARM_ABS32 relocation referencing the
7630 beginning of the PLT for this GOT entry. */
7631 rel.r_offset = got_address;
7632 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7634 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7636 else if (htab->nacl_p)
7638 /* Calculate the displacement between the PLT slot and the
7639 common tail that's part of the special initial PLT slot. */
7640 int32_t tail_displacement
7641 = ((splt->output_section->vma + splt->output_offset
7642 + ARM_NACL_PLT_TAIL_OFFSET)
7643 - (plt_address + htab->plt_entry_size + 4));
7644 BFD_ASSERT ((tail_displacement & 3) == 0);
7645 tail_displacement >>= 2;
7647 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
7648 || (-tail_displacement & 0xff000000) == 0);
7650 /* Calculate the displacement between the PLT slot and the entry
7651 in the GOT. The offset accounts for the value produced by
7652 adding to pc in the penultimate instruction of the PLT stub. */
7653 got_displacement = (got_address
7654 - (plt_address + htab->plt_entry_size));
7656 /* NaCl does not support interworking at all. */
7657 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
7659 put_arm_insn (htab, output_bfd,
7660 elf32_arm_nacl_plt_entry[0]
7661 | arm_movw_immediate (got_displacement),
7663 put_arm_insn (htab, output_bfd,
7664 elf32_arm_nacl_plt_entry[1]
7665 | arm_movt_immediate (got_displacement),
7667 put_arm_insn (htab, output_bfd,
7668 elf32_arm_nacl_plt_entry[2],
7670 put_arm_insn (htab, output_bfd,
7671 elf32_arm_nacl_plt_entry[3]
7672 | (tail_displacement & 0x00ffffff),
7677 /* Calculate the displacement between the PLT slot and the
7678 entry in the GOT. The eight-byte offset accounts for the
7679 value produced by adding to pc in the first instruction
7681 got_displacement = got_address - (plt_address + 8);
7683 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7685 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7687 put_thumb_insn (htab, output_bfd,
7688 elf32_arm_plt_thumb_stub[0], ptr - 4);
7689 put_thumb_insn (htab, output_bfd,
7690 elf32_arm_plt_thumb_stub[1], ptr - 2);
7693 put_arm_insn (htab, output_bfd,
7694 elf32_arm_plt_entry[0]
7695 | ((got_displacement & 0x0ff00000) >> 20),
7697 put_arm_insn (htab, output_bfd,
7698 elf32_arm_plt_entry[1]
7699 | ((got_displacement & 0x000ff000) >> 12),
7701 put_arm_insn (htab, output_bfd,
7702 elf32_arm_plt_entry[2]
7703 | (got_displacement & 0x00000fff),
7705 #ifdef FOUR_WORD_PLT
7706 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
7710 /* Fill in the entry in the .rel(a).(i)plt section. */
7711 rel.r_offset = got_address;
7715 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7716 The dynamic linker or static executable then calls SYM_VALUE
7717 to determine the correct run-time value of the .igot.plt entry. */
7718 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7719 initial_got_entry = sym_value;
7723 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7724 initial_got_entry = (splt->output_section->vma
7725 + splt->output_offset);
7728 /* Fill in the entry in the global offset table. */
7729 bfd_put_32 (output_bfd, initial_got_entry,
7730 sgot->contents + got_offset);
7733 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7734 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7737 /* Some relocations map to different relocations depending on the
7738 target. Return the real relocation. */
7741 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7747 if (globals->target1_is_rel)
7753 return globals->target2_reloc;
7760 /* Return the base VMA address which should be subtracted from real addresses
7761 when resolving @dtpoff relocation.
7762 This is PT_TLS segment p_vaddr. */
7765 dtpoff_base (struct bfd_link_info *info)
7767 /* If tls_sec is NULL, we should have signalled an error already. */
7768 if (elf_hash_table (info)->tls_sec == NULL)
7770 return elf_hash_table (info)->tls_sec->vma;
7773 /* Return the relocation value for @tpoff relocation
7774 if STT_TLS virtual address is ADDRESS. */
7777 tpoff (struct bfd_link_info *info, bfd_vma address)
7779 struct elf_link_hash_table *htab = elf_hash_table (info);
7782 /* If tls_sec is NULL, we should have signalled an error already. */
7783 if (htab->tls_sec == NULL)
7785 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7786 return address - htab->tls_sec->vma + base;
7789 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7790 VALUE is the relocation value. */
7792 static bfd_reloc_status_type
7793 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7796 return bfd_reloc_overflow;
7798 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7799 bfd_put_32 (abfd, value, data);
7800 return bfd_reloc_ok;
7803 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7804 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7805 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7807 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7808 is to then call final_link_relocate. Return other values in the
7811 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7812 the pre-relaxed code. It would be nice if the relocs were updated
7813 to match the optimization. */
7815 static bfd_reloc_status_type
7816 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7817 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7818 Elf_Internal_Rela *rel, unsigned long is_local)
7822 switch (ELF32_R_TYPE (rel->r_info))
7825 return bfd_reloc_notsupported;
7827 case R_ARM_TLS_GOTDESC:
7832 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7834 insn -= 5; /* THUMB */
7836 insn -= 8; /* ARM */
7838 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7839 return bfd_reloc_continue;
7841 case R_ARM_THM_TLS_DESCSEQ:
7843 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
7844 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
7848 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7850 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
7854 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7857 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
7859 else if ((insn & 0xff87) == 0x4780) /* blx rx */
7863 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7866 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
7867 contents + rel->r_offset);
7871 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
7872 /* It's a 32 bit instruction, fetch the rest of it for
7873 error generation. */
7875 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
7876 (*_bfd_error_handler)
7877 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
7878 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7879 return bfd_reloc_notsupported;
7883 case R_ARM_TLS_DESCSEQ:
7885 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7886 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
7890 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
7891 contents + rel->r_offset);
7893 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
7897 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7900 bfd_put_32 (input_bfd, insn & 0xfffff000,
7901 contents + rel->r_offset);
7903 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
7907 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7910 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
7911 contents + rel->r_offset);
7915 (*_bfd_error_handler)
7916 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
7917 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7918 return bfd_reloc_notsupported;
7922 case R_ARM_TLS_CALL:
7923 /* GD->IE relaxation, turn the instruction into 'nop' or
7924 'ldr r0, [pc,r0]' */
7925 insn = is_local ? 0xe1a00000 : 0xe79f0000;
7926 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7929 case R_ARM_THM_TLS_CALL:
7930 /* GD->IE relaxation */
7932 /* add r0,pc; ldr r0, [r0] */
7934 else if (arch_has_thumb2_nop (globals))
7941 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
7942 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
7945 return bfd_reloc_ok;
7948 /* For a given value of n, calculate the value of G_n as required to
7949 deal with group relocations. We return it in the form of an
7950 encoded constant-and-rotation, together with the final residual. If n is
7951 specified as less than zero, then final_residual is filled with the
7952 input value and no further action is performed. */
7955 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
7959 bfd_vma encoded_g_n = 0;
7960 bfd_vma residual = value; /* Also known as Y_n. */
7962 for (current_n = 0; current_n <= n; current_n++)
7966 /* Calculate which part of the value to mask. */
7973 /* Determine the most significant bit in the residual and
7974 align the resulting value to a 2-bit boundary. */
7975 for (msb = 30; msb >= 0; msb -= 2)
7976 if (residual & (3 << msb))
7979 /* The desired shift is now (msb - 6), or zero, whichever
7986 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
7987 g_n = residual & (0xff << shift);
7988 encoded_g_n = (g_n >> shift)
7989 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
7991 /* Calculate the residual for the next time around. */
7995 *final_residual = residual;
8000 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8001 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8004 identify_add_or_sub (bfd_vma insn)
8006 int opcode = insn & 0x1e00000;
8008 if (opcode == 1 << 23) /* ADD */
8011 if (opcode == 1 << 22) /* SUB */
8017 /* Perform a relocation as part of a final link. */
8019 static bfd_reloc_status_type
8020 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8023 asection * input_section,
8024 bfd_byte * contents,
8025 Elf_Internal_Rela * rel,
8027 struct bfd_link_info * info,
8029 const char * sym_name,
8030 unsigned char st_type,
8031 enum arm_st_branch_type branch_type,
8032 struct elf_link_hash_entry * h,
8033 bfd_boolean * unresolved_reloc_p,
8034 char ** error_message)
8036 unsigned long r_type = howto->type;
8037 unsigned long r_symndx;
8038 bfd_byte * hit_data = contents + rel->r_offset;
8039 bfd_vma * local_got_offsets;
8040 bfd_vma * local_tlsdesc_gotents;
8043 asection * sreloc = NULL;
8046 bfd_signed_vma signed_addend;
8047 unsigned char dynreloc_st_type;
8048 bfd_vma dynreloc_value;
8049 struct elf32_arm_link_hash_table * globals;
8050 struct elf32_arm_link_hash_entry *eh;
8051 union gotplt_union *root_plt;
8052 struct arm_plt_info *arm_plt;
8054 bfd_vma gotplt_offset;
8055 bfd_boolean has_iplt_entry;
8057 globals = elf32_arm_hash_table (info);
8058 if (globals == NULL)
8059 return bfd_reloc_notsupported;
8061 BFD_ASSERT (is_arm_elf (input_bfd));
8063 /* Some relocation types map to different relocations depending on the
8064 target. We pick the right one here. */
8065 r_type = arm_real_reloc_type (globals, r_type);
8067 /* It is possible to have linker relaxations on some TLS access
8068 models. Update our information here. */
8069 r_type = elf32_arm_tls_transition (info, r_type, h);
8071 if (r_type != howto->type)
8072 howto = elf32_arm_howto_from_type (r_type);
8074 /* If the start address has been set, then set the EF_ARM_HASENTRY
8075 flag. Setting this more than once is redundant, but the cost is
8076 not too high, and it keeps the code simple.
8078 The test is done here, rather than somewhere else, because the
8079 start address is only set just before the final link commences.
8081 Note - if the user deliberately sets a start address of 0, the
8082 flag will not be set. */
8083 if (bfd_get_start_address (output_bfd) != 0)
8084 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
8086 eh = (struct elf32_arm_link_hash_entry *) h;
8087 sgot = globals->root.sgot;
8088 local_got_offsets = elf_local_got_offsets (input_bfd);
8089 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8091 if (globals->root.dynamic_sections_created)
8092 srelgot = globals->root.srelgot;
8096 r_symndx = ELF32_R_SYM (rel->r_info);
8098 if (globals->use_rel)
8100 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8102 if (addend & ((howto->src_mask + 1) >> 1))
8105 signed_addend &= ~ howto->src_mask;
8106 signed_addend |= addend;
8109 signed_addend = addend;
8112 addend = signed_addend = rel->r_addend;
8114 /* Record the symbol information that should be used in dynamic
8116 dynreloc_st_type = st_type;
8117 dynreloc_value = value;
8118 if (branch_type == ST_BRANCH_TO_THUMB)
8119 dynreloc_value |= 1;
8121 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8122 VALUE appropriately for relocations that we resolve at link time. */
8123 has_iplt_entry = FALSE;
8124 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8125 && root_plt->offset != (bfd_vma) -1)
8127 plt_offset = root_plt->offset;
8128 gotplt_offset = arm_plt->got_offset;
8130 if (h == NULL || eh->is_iplt)
8132 has_iplt_entry = TRUE;
8133 splt = globals->root.iplt;
8135 /* Populate .iplt entries here, because not all of them will
8136 be seen by finish_dynamic_symbol. The lower bit is set if
8137 we have already populated the entry. */
8142 elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8143 -1, dynreloc_value);
8144 root_plt->offset |= 1;
8147 /* Static relocations always resolve to the .iplt entry. */
8149 value = (splt->output_section->vma
8150 + splt->output_offset
8152 branch_type = ST_BRANCH_TO_ARM;
8154 /* If there are non-call relocations that resolve to the .iplt
8155 entry, then all dynamic ones must too. */
8156 if (arm_plt->noncall_refcount != 0)
8158 dynreloc_st_type = st_type;
8159 dynreloc_value = value;
8163 /* We populate the .plt entry in finish_dynamic_symbol. */
8164 splt = globals->root.splt;
8169 plt_offset = (bfd_vma) -1;
8170 gotplt_offset = (bfd_vma) -1;
8176 /* We don't need to find a value for this symbol. It's just a
8178 *unresolved_reloc_p = FALSE;
8179 return bfd_reloc_ok;
8182 if (!globals->vxworks_p)
8183 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8187 case R_ARM_ABS32_NOI:
8189 case R_ARM_REL32_NOI:
8195 /* Handle relocations which should use the PLT entry. ABS32/REL32
8196 will use the symbol's value, which may point to a PLT entry, but we
8197 don't need to handle that here. If we created a PLT entry, all
8198 branches in this object should go to it, except if the PLT is too
8199 far away, in which case a long branch stub should be inserted. */
8200 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8201 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8202 && r_type != R_ARM_CALL
8203 && r_type != R_ARM_JUMP24
8204 && r_type != R_ARM_PLT32)
8205 && plt_offset != (bfd_vma) -1)
8207 /* If we've created a .plt section, and assigned a PLT entry
8208 to this function, it must either be a STT_GNU_IFUNC reference
8209 or not be known to bind locally. In other cases, we should
8210 have cleared the PLT entry by now. */
8211 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8213 value = (splt->output_section->vma
8214 + splt->output_offset
8216 *unresolved_reloc_p = FALSE;
8217 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8218 contents, rel->r_offset, value,
8222 /* When generating a shared object or relocatable executable, these
8223 relocations are copied into the output file to be resolved at
8225 if ((info->shared || globals->root.is_relocatable_executable)
8226 && (input_section->flags & SEC_ALLOC)
8227 && !(globals->vxworks_p
8228 && strcmp (input_section->output_section->name,
8230 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8231 || !SYMBOL_CALLS_LOCAL (info, h))
8232 && (!strstr (input_section->name, STUB_SUFFIX))
8234 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8235 || h->root.type != bfd_link_hash_undefweak)
8236 && r_type != R_ARM_PC24
8237 && r_type != R_ARM_CALL
8238 && r_type != R_ARM_JUMP24
8239 && r_type != R_ARM_PREL31
8240 && r_type != R_ARM_PLT32)
8242 Elf_Internal_Rela outrel;
8243 bfd_boolean skip, relocate;
8245 *unresolved_reloc_p = FALSE;
8247 if (sreloc == NULL && globals->root.dynamic_sections_created)
8249 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8250 ! globals->use_rel);
8253 return bfd_reloc_notsupported;
8259 outrel.r_addend = addend;
8261 _bfd_elf_section_offset (output_bfd, info, input_section,
8263 if (outrel.r_offset == (bfd_vma) -1)
8265 else if (outrel.r_offset == (bfd_vma) -2)
8266 skip = TRUE, relocate = TRUE;
8267 outrel.r_offset += (input_section->output_section->vma
8268 + input_section->output_offset);
8271 memset (&outrel, 0, sizeof outrel);
8276 || !h->def_regular))
8277 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8282 /* This symbol is local, or marked to become local. */
8283 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8284 if (globals->symbian_p)
8288 /* On Symbian OS, the data segment and text segement
8289 can be relocated independently. Therefore, we
8290 must indicate the segment to which this
8291 relocation is relative. The BPABI allows us to
8292 use any symbol in the right segment; we just use
8293 the section symbol as it is convenient. (We
8294 cannot use the symbol given by "h" directly as it
8295 will not appear in the dynamic symbol table.)
8297 Note that the dynamic linker ignores the section
8298 symbol value, so we don't subtract osec->vma
8299 from the emitted reloc addend. */
8301 osec = sym_sec->output_section;
8303 osec = input_section->output_section;
8304 symbol = elf_section_data (osec)->dynindx;
8307 struct elf_link_hash_table *htab = elf_hash_table (info);
8309 if ((osec->flags & SEC_READONLY) == 0
8310 && htab->data_index_section != NULL)
8311 osec = htab->data_index_section;
8313 osec = htab->text_index_section;
8314 symbol = elf_section_data (osec)->dynindx;
8316 BFD_ASSERT (symbol != 0);
8319 /* On SVR4-ish systems, the dynamic loader cannot
8320 relocate the text and data segments independently,
8321 so the symbol does not matter. */
8323 if (dynreloc_st_type == STT_GNU_IFUNC)
8324 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8325 to the .iplt entry. Instead, every non-call reference
8326 must use an R_ARM_IRELATIVE relocation to obtain the
8327 correct run-time address. */
8328 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8330 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8331 if (globals->use_rel)
8334 outrel.r_addend += dynreloc_value;
8337 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8339 /* If this reloc is against an external symbol, we do not want to
8340 fiddle with the addend. Otherwise, we need to include the symbol
8341 value so that it becomes an addend for the dynamic reloc. */
8343 return bfd_reloc_ok;
8345 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8346 contents, rel->r_offset,
8347 dynreloc_value, (bfd_vma) 0);
8349 else switch (r_type)
8352 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8354 case R_ARM_XPC25: /* Arm BLX instruction. */
8357 case R_ARM_PC24: /* Arm B/BL instruction. */
8360 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8362 if (r_type == R_ARM_XPC25)
8364 /* Check for Arm calling Arm function. */
8365 /* FIXME: Should we translate the instruction into a BL
8366 instruction instead ? */
8367 if (branch_type != ST_BRANCH_TO_THUMB)
8368 (*_bfd_error_handler)
8369 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8371 h ? h->root.root.string : "(local)");
8373 else if (r_type == R_ARM_PC24)
8375 /* Check for Arm calling Thumb function. */
8376 if (branch_type == ST_BRANCH_TO_THUMB)
8378 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8379 output_bfd, input_section,
8380 hit_data, sym_sec, rel->r_offset,
8381 signed_addend, value,
8383 return bfd_reloc_ok;
8385 return bfd_reloc_dangerous;
8389 /* Check if a stub has to be inserted because the
8390 destination is too far or we are changing mode. */
8391 if ( r_type == R_ARM_CALL
8392 || r_type == R_ARM_JUMP24
8393 || r_type == R_ARM_PLT32)
8395 enum elf32_arm_stub_type stub_type = arm_stub_none;
8396 struct elf32_arm_link_hash_entry *hash;
8398 hash = (struct elf32_arm_link_hash_entry *) h;
8399 stub_type = arm_type_of_stub (info, input_section, rel,
8400 st_type, &branch_type,
8401 hash, value, sym_sec,
8402 input_bfd, sym_name);
8404 if (stub_type != arm_stub_none)
8406 /* The target is out of reach, so redirect the
8407 branch to the local stub for this function. */
8408 stub_entry = elf32_arm_get_stub_entry (input_section,
8413 if (stub_entry != NULL)
8414 value = (stub_entry->stub_offset
8415 + stub_entry->stub_sec->output_offset
8416 + stub_entry->stub_sec->output_section->vma);
8418 if (plt_offset != (bfd_vma) -1)
8419 *unresolved_reloc_p = FALSE;
8424 /* If the call goes through a PLT entry, make sure to
8425 check distance to the right destination address. */
8426 if (plt_offset != (bfd_vma) -1)
8428 value = (splt->output_section->vma
8429 + splt->output_offset
8431 *unresolved_reloc_p = FALSE;
8432 /* The PLT entry is in ARM mode, regardless of the
8434 branch_type = ST_BRANCH_TO_ARM;
8439 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8441 S is the address of the symbol in the relocation.
8442 P is address of the instruction being relocated.
8443 A is the addend (extracted from the instruction) in bytes.
8445 S is held in 'value'.
8446 P is the base address of the section containing the
8447 instruction plus the offset of the reloc into that
8449 (input_section->output_section->vma +
8450 input_section->output_offset +
8452 A is the addend, converted into bytes, ie:
8455 Note: None of these operations have knowledge of the pipeline
8456 size of the processor, thus it is up to the assembler to
8457 encode this information into the addend. */
8458 value -= (input_section->output_section->vma
8459 + input_section->output_offset);
8460 value -= rel->r_offset;
8461 if (globals->use_rel)
8462 value += (signed_addend << howto->size);
8464 /* RELA addends do not have to be adjusted by howto->size. */
8465 value += signed_addend;
8467 signed_addend = value;
8468 signed_addend >>= howto->rightshift;
8470 /* A branch to an undefined weak symbol is turned into a jump to
8471 the next instruction unless a PLT entry will be created.
8472 Do the same for local undefined symbols (but not for STN_UNDEF).
8473 The jump to the next instruction is optimized as a NOP depending
8474 on the architecture. */
8475 if (h ? (h->root.type == bfd_link_hash_undefweak
8476 && plt_offset == (bfd_vma) -1)
8477 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8479 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8481 if (arch_has_arm_nop (globals))
8482 value |= 0x0320f000;
8484 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8488 /* Perform a signed range check. */
8489 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8490 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8491 return bfd_reloc_overflow;
8493 addend = (value & 2);
8495 value = (signed_addend & howto->dst_mask)
8496 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8498 if (r_type == R_ARM_CALL)
8500 /* Set the H bit in the BLX instruction. */
8501 if (branch_type == ST_BRANCH_TO_THUMB)
8506 value &= ~(bfd_vma)(1 << 24);
8509 /* Select the correct instruction (BL or BLX). */
8510 /* Only if we are not handling a BL to a stub. In this
8511 case, mode switching is performed by the stub. */
8512 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8514 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
8516 value &= ~(bfd_vma)(1 << 28);
8526 if (branch_type == ST_BRANCH_TO_THUMB)
8530 case R_ARM_ABS32_NOI:
8536 if (branch_type == ST_BRANCH_TO_THUMB)
8538 value -= (input_section->output_section->vma
8539 + input_section->output_offset + rel->r_offset);
8542 case R_ARM_REL32_NOI:
8544 value -= (input_section->output_section->vma
8545 + input_section->output_offset + rel->r_offset);
8549 value -= (input_section->output_section->vma
8550 + input_section->output_offset + rel->r_offset);
8551 value += signed_addend;
8552 if (! h || h->root.type != bfd_link_hash_undefweak)
8554 /* Check for overflow. */
8555 if ((value ^ (value >> 1)) & (1 << 30))
8556 return bfd_reloc_overflow;
8558 value &= 0x7fffffff;
8559 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8560 if (branch_type == ST_BRANCH_TO_THUMB)
8565 bfd_put_32 (input_bfd, value, hit_data);
8566 return bfd_reloc_ok;
8571 /* There is no way to tell whether the user intended to use a signed or
8572 unsigned addend. When checking for overflow we accept either,
8573 as specified by the AAELF. */
8574 if ((long) value > 0xff || (long) value < -0x80)
8575 return bfd_reloc_overflow;
8577 bfd_put_8 (input_bfd, value, hit_data);
8578 return bfd_reloc_ok;
8583 /* See comment for R_ARM_ABS8. */
8584 if ((long) value > 0xffff || (long) value < -0x8000)
8585 return bfd_reloc_overflow;
8587 bfd_put_16 (input_bfd, value, hit_data);
8588 return bfd_reloc_ok;
8590 case R_ARM_THM_ABS5:
8591 /* Support ldr and str instructions for the thumb. */
8592 if (globals->use_rel)
8594 /* Need to refetch addend. */
8595 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8596 /* ??? Need to determine shift amount from operand size. */
8597 addend >>= howto->rightshift;
8601 /* ??? Isn't value unsigned? */
8602 if ((long) value > 0x1f || (long) value < -0x10)
8603 return bfd_reloc_overflow;
8605 /* ??? Value needs to be properly shifted into place first. */
8606 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8607 bfd_put_16 (input_bfd, value, hit_data);
8608 return bfd_reloc_ok;
8610 case R_ARM_THM_ALU_PREL_11_0:
8611 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8614 bfd_signed_vma relocation;
8616 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8617 | bfd_get_16 (input_bfd, hit_data + 2);
8619 if (globals->use_rel)
8621 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8622 | ((insn & (1 << 26)) >> 15);
8623 if (insn & 0xf00000)
8624 signed_addend = -signed_addend;
8627 relocation = value + signed_addend;
8628 relocation -= Pa (input_section->output_section->vma
8629 + input_section->output_offset
8632 value = abs (relocation);
8634 if (value >= 0x1000)
8635 return bfd_reloc_overflow;
8637 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8638 | ((value & 0x700) << 4)
8639 | ((value & 0x800) << 15);
8643 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8644 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8646 return bfd_reloc_ok;
8650 /* PR 10073: This reloc is not generated by the GNU toolchain,
8651 but it is supported for compatibility with third party libraries
8652 generated by other compilers, specifically the ARM/IAR. */
8655 bfd_signed_vma relocation;
8657 insn = bfd_get_16 (input_bfd, hit_data);
8659 if (globals->use_rel)
8660 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
8662 relocation = value + addend;
8663 relocation -= Pa (input_section->output_section->vma
8664 + input_section->output_offset
8667 value = abs (relocation);
8669 /* We do not check for overflow of this reloc. Although strictly
8670 speaking this is incorrect, it appears to be necessary in order
8671 to work with IAR generated relocs. Since GCC and GAS do not
8672 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8673 a problem for them. */
8676 insn = (insn & 0xff00) | (value >> 2);
8678 bfd_put_16 (input_bfd, insn, hit_data);
8680 return bfd_reloc_ok;
8683 case R_ARM_THM_PC12:
8684 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8687 bfd_signed_vma relocation;
8689 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8690 | bfd_get_16 (input_bfd, hit_data + 2);
8692 if (globals->use_rel)
8694 signed_addend = insn & 0xfff;
8695 if (!(insn & (1 << 23)))
8696 signed_addend = -signed_addend;
8699 relocation = value + signed_addend;
8700 relocation -= Pa (input_section->output_section->vma
8701 + input_section->output_offset
8704 value = abs (relocation);
8706 if (value >= 0x1000)
8707 return bfd_reloc_overflow;
8709 insn = (insn & 0xff7ff000) | value;
8710 if (relocation >= 0)
8713 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8714 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8716 return bfd_reloc_ok;
8719 case R_ARM_THM_XPC22:
8720 case R_ARM_THM_CALL:
8721 case R_ARM_THM_JUMP24:
8722 /* Thumb BL (branch long instruction). */
8726 bfd_boolean overflow = FALSE;
8727 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8728 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8729 bfd_signed_vma reloc_signed_max;
8730 bfd_signed_vma reloc_signed_min;
8732 bfd_signed_vma signed_check;
8734 const int thumb2 = using_thumb2 (globals);
8736 /* A branch to an undefined weak symbol is turned into a jump to
8737 the next instruction unless a PLT entry will be created.
8738 The jump to the next instruction is optimized as a NOP.W for
8739 Thumb-2 enabled architectures. */
8740 if (h && h->root.type == bfd_link_hash_undefweak
8741 && plt_offset == (bfd_vma) -1)
8743 if (arch_has_thumb2_nop (globals))
8745 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8746 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8750 bfd_put_16 (input_bfd, 0xe000, hit_data);
8751 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8753 return bfd_reloc_ok;
8756 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8757 with Thumb-1) involving the J1 and J2 bits. */
8758 if (globals->use_rel)
8760 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8761 bfd_vma upper = upper_insn & 0x3ff;
8762 bfd_vma lower = lower_insn & 0x7ff;
8763 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8764 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8765 bfd_vma i1 = j1 ^ s ? 0 : 1;
8766 bfd_vma i2 = j2 ^ s ? 0 : 1;
8768 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8770 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8772 signed_addend = addend;
8775 if (r_type == R_ARM_THM_XPC22)
8777 /* Check for Thumb to Thumb call. */
8778 /* FIXME: Should we translate the instruction into a BL
8779 instruction instead ? */
8780 if (branch_type == ST_BRANCH_TO_THUMB)
8781 (*_bfd_error_handler)
8782 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8784 h ? h->root.root.string : "(local)");
8788 /* If it is not a call to Thumb, assume call to Arm.
8789 If it is a call relative to a section name, then it is not a
8790 function call at all, but rather a long jump. Calls through
8791 the PLT do not require stubs. */
8792 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8794 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8796 /* Convert BL to BLX. */
8797 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8799 else if (( r_type != R_ARM_THM_CALL)
8800 && (r_type != R_ARM_THM_JUMP24))
8802 if (elf32_thumb_to_arm_stub
8803 (info, sym_name, input_bfd, output_bfd, input_section,
8804 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8806 return bfd_reloc_ok;
8808 return bfd_reloc_dangerous;
8811 else if (branch_type == ST_BRANCH_TO_THUMB
8813 && r_type == R_ARM_THM_CALL)
8815 /* Make sure this is a BL. */
8816 lower_insn |= 0x1800;
8820 enum elf32_arm_stub_type stub_type = arm_stub_none;
8821 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
8823 /* Check if a stub has to be inserted because the destination
8825 struct elf32_arm_stub_hash_entry *stub_entry;
8826 struct elf32_arm_link_hash_entry *hash;
8828 hash = (struct elf32_arm_link_hash_entry *) h;
8830 stub_type = arm_type_of_stub (info, input_section, rel,
8831 st_type, &branch_type,
8832 hash, value, sym_sec,
8833 input_bfd, sym_name);
8835 if (stub_type != arm_stub_none)
8837 /* The target is out of reach or we are changing modes, so
8838 redirect the branch to the local stub for this
8840 stub_entry = elf32_arm_get_stub_entry (input_section,
8844 if (stub_entry != NULL)
8846 value = (stub_entry->stub_offset
8847 + stub_entry->stub_sec->output_offset
8848 + stub_entry->stub_sec->output_section->vma);
8850 if (plt_offset != (bfd_vma) -1)
8851 *unresolved_reloc_p = FALSE;
8854 /* If this call becomes a call to Arm, force BLX. */
8855 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
8858 && !arm_stub_is_thumb (stub_entry->stub_type))
8859 || branch_type != ST_BRANCH_TO_THUMB)
8860 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8865 /* Handle calls via the PLT. */
8866 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
8868 value = (splt->output_section->vma
8869 + splt->output_offset
8872 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8874 /* If the Thumb BLX instruction is available, convert
8875 the BL to a BLX instruction to call the ARM-mode
8877 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8878 branch_type = ST_BRANCH_TO_ARM;
8882 /* Target the Thumb stub before the ARM PLT entry. */
8883 value -= PLT_THUMB_STUB_SIZE;
8884 branch_type = ST_BRANCH_TO_THUMB;
8886 *unresolved_reloc_p = FALSE;
8889 relocation = value + signed_addend;
8891 relocation -= (input_section->output_section->vma
8892 + input_section->output_offset
8895 check = relocation >> howto->rightshift;
8897 /* If this is a signed value, the rightshift just dropped
8898 leading 1 bits (assuming twos complement). */
8899 if ((bfd_signed_vma) relocation >= 0)
8900 signed_check = check;
8902 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
8904 /* Calculate the permissable maximum and minimum values for
8905 this relocation according to whether we're relocating for
8907 bitsize = howto->bitsize;
8910 reloc_signed_max = (1 << (bitsize - 1)) - 1;
8911 reloc_signed_min = ~reloc_signed_max;
8913 /* Assumes two's complement. */
8914 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8917 if ((lower_insn & 0x5000) == 0x4000)
8918 /* For a BLX instruction, make sure that the relocation is rounded up
8919 to a word boundary. This follows the semantics of the instruction
8920 which specifies that bit 1 of the target address will come from bit
8921 1 of the base address. */
8922 relocation = (relocation + 2) & ~ 3;
8924 /* Put RELOCATION back into the insn. Assumes two's complement.
8925 We use the Thumb-2 encoding, which is safe even if dealing with
8926 a Thumb-1 instruction by virtue of our overflow check above. */
8927 reloc_sign = (signed_check < 0) ? 1 : 0;
8928 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
8929 | ((relocation >> 12) & 0x3ff)
8930 | (reloc_sign << 10);
8931 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
8932 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
8933 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
8934 | ((relocation >> 1) & 0x7ff);
8936 /* Put the relocated value back in the object file: */
8937 bfd_put_16 (input_bfd, upper_insn, hit_data);
8938 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8940 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8944 case R_ARM_THM_JUMP19:
8945 /* Thumb32 conditional branch instruction. */
8948 bfd_boolean overflow = FALSE;
8949 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8950 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8951 bfd_signed_vma reloc_signed_max = 0xffffe;
8952 bfd_signed_vma reloc_signed_min = -0x100000;
8953 bfd_signed_vma signed_check;
8955 /* Need to refetch the addend, reconstruct the top three bits,
8956 and squish the two 11 bit pieces together. */
8957 if (globals->use_rel)
8959 bfd_vma S = (upper_insn & 0x0400) >> 10;
8960 bfd_vma upper = (upper_insn & 0x003f);
8961 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
8962 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
8963 bfd_vma lower = (lower_insn & 0x07ff);
8968 upper -= 0x0100; /* Sign extend. */
8970 addend = (upper << 12) | (lower << 1);
8971 signed_addend = addend;
8974 /* Handle calls via the PLT. */
8975 if (plt_offset != (bfd_vma) -1)
8977 value = (splt->output_section->vma
8978 + splt->output_offset
8980 /* Target the Thumb stub before the ARM PLT entry. */
8981 value -= PLT_THUMB_STUB_SIZE;
8982 *unresolved_reloc_p = FALSE;
8985 /* ??? Should handle interworking? GCC might someday try to
8986 use this for tail calls. */
8988 relocation = value + signed_addend;
8989 relocation -= (input_section->output_section->vma
8990 + input_section->output_offset
8992 signed_check = (bfd_signed_vma) relocation;
8994 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8997 /* Put RELOCATION back into the insn. */
8999 bfd_vma S = (relocation & 0x00100000) >> 20;
9000 bfd_vma J2 = (relocation & 0x00080000) >> 19;
9001 bfd_vma J1 = (relocation & 0x00040000) >> 18;
9002 bfd_vma hi = (relocation & 0x0003f000) >> 12;
9003 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
9005 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
9006 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9009 /* Put the relocated value back in the object file: */
9010 bfd_put_16 (input_bfd, upper_insn, hit_data);
9011 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9013 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9016 case R_ARM_THM_JUMP11:
9017 case R_ARM_THM_JUMP8:
9018 case R_ARM_THM_JUMP6:
9019 /* Thumb B (branch) instruction). */
9021 bfd_signed_vma relocation;
9022 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9023 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9024 bfd_signed_vma signed_check;
9026 /* CZB cannot jump backward. */
9027 if (r_type == R_ARM_THM_JUMP6)
9028 reloc_signed_min = 0;
9030 if (globals->use_rel)
9032 /* Need to refetch addend. */
9033 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9034 if (addend & ((howto->src_mask + 1) >> 1))
9037 signed_addend &= ~ howto->src_mask;
9038 signed_addend |= addend;
9041 signed_addend = addend;
9042 /* The value in the insn has been right shifted. We need to
9043 undo this, so that we can perform the address calculation
9044 in terms of bytes. */
9045 signed_addend <<= howto->rightshift;
9047 relocation = value + signed_addend;
9049 relocation -= (input_section->output_section->vma
9050 + input_section->output_offset
9053 relocation >>= howto->rightshift;
9054 signed_check = relocation;
9056 if (r_type == R_ARM_THM_JUMP6)
9057 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9059 relocation &= howto->dst_mask;
9060 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9062 bfd_put_16 (input_bfd, relocation, hit_data);
9064 /* Assumes two's complement. */
9065 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9066 return bfd_reloc_overflow;
9068 return bfd_reloc_ok;
9071 case R_ARM_ALU_PCREL7_0:
9072 case R_ARM_ALU_PCREL15_8:
9073 case R_ARM_ALU_PCREL23_15:
9078 insn = bfd_get_32 (input_bfd, hit_data);
9079 if (globals->use_rel)
9081 /* Extract the addend. */
9082 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9083 signed_addend = addend;
9085 relocation = value + signed_addend;
9087 relocation -= (input_section->output_section->vma
9088 + input_section->output_offset
9090 insn = (insn & ~0xfff)
9091 | ((howto->bitpos << 7) & 0xf00)
9092 | ((relocation >> howto->bitpos) & 0xff);
9093 bfd_put_32 (input_bfd, value, hit_data);
9095 return bfd_reloc_ok;
9097 case R_ARM_GNU_VTINHERIT:
9098 case R_ARM_GNU_VTENTRY:
9099 return bfd_reloc_ok;
9101 case R_ARM_GOTOFF32:
9102 /* Relocation is relative to the start of the
9103 global offset table. */
9105 BFD_ASSERT (sgot != NULL);
9107 return bfd_reloc_notsupported;
9109 /* If we are addressing a Thumb function, we need to adjust the
9110 address by one, so that attempts to call the function pointer will
9111 correctly interpret it as Thumb code. */
9112 if (branch_type == ST_BRANCH_TO_THUMB)
9115 /* Note that sgot->output_offset is not involved in this
9116 calculation. We always want the start of .got. If we
9117 define _GLOBAL_OFFSET_TABLE in a different way, as is
9118 permitted by the ABI, we might have to change this
9120 value -= sgot->output_section->vma;
9121 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9122 contents, rel->r_offset, value,
9126 /* Use global offset table as symbol value. */
9127 BFD_ASSERT (sgot != NULL);
9130 return bfd_reloc_notsupported;
9132 *unresolved_reloc_p = FALSE;
9133 value = sgot->output_section->vma;
9134 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9135 contents, rel->r_offset, value,
9139 case R_ARM_GOT_PREL:
9140 /* Relocation is to the entry for this symbol in the
9141 global offset table. */
9143 return bfd_reloc_notsupported;
9145 if (dynreloc_st_type == STT_GNU_IFUNC
9146 && plt_offset != (bfd_vma) -1
9147 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9149 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9150 symbol, and the relocation resolves directly to the runtime
9151 target rather than to the .iplt entry. This means that any
9152 .got entry would be the same value as the .igot.plt entry,
9153 so there's no point creating both. */
9154 sgot = globals->root.igotplt;
9155 value = sgot->output_offset + gotplt_offset;
9161 off = h->got.offset;
9162 BFD_ASSERT (off != (bfd_vma) -1);
9165 /* We have already processsed one GOT relocation against
9168 if (globals->root.dynamic_sections_created
9169 && !SYMBOL_REFERENCES_LOCAL (info, h))
9170 *unresolved_reloc_p = FALSE;
9174 Elf_Internal_Rela outrel;
9176 if (!SYMBOL_REFERENCES_LOCAL (info, h))
9178 /* If the symbol doesn't resolve locally in a static
9179 object, we have an undefined reference. If the
9180 symbol doesn't resolve locally in a dynamic object,
9181 it should be resolved by the dynamic linker. */
9182 if (globals->root.dynamic_sections_created)
9184 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
9185 *unresolved_reloc_p = FALSE;
9189 outrel.r_addend = 0;
9193 if (dynreloc_st_type == STT_GNU_IFUNC)
9194 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9195 else if (info->shared)
9196 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9199 outrel.r_addend = dynreloc_value;
9202 /* The GOT entry is initialized to zero by default.
9203 See if we should install a different value. */
9204 if (outrel.r_addend != 0
9205 && (outrel.r_info == 0 || globals->use_rel))
9207 bfd_put_32 (output_bfd, outrel.r_addend,
9208 sgot->contents + off);
9209 outrel.r_addend = 0;
9212 if (outrel.r_info != 0)
9214 outrel.r_offset = (sgot->output_section->vma
9215 + sgot->output_offset
9217 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9221 value = sgot->output_offset + off;
9227 BFD_ASSERT (local_got_offsets != NULL &&
9228 local_got_offsets[r_symndx] != (bfd_vma) -1);
9230 off = local_got_offsets[r_symndx];
9232 /* The offset must always be a multiple of 4. We use the
9233 least significant bit to record whether we have already
9234 generated the necessary reloc. */
9239 if (globals->use_rel)
9240 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9242 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9244 Elf_Internal_Rela outrel;
9246 outrel.r_addend = addend + dynreloc_value;
9247 outrel.r_offset = (sgot->output_section->vma
9248 + sgot->output_offset
9250 if (dynreloc_st_type == STT_GNU_IFUNC)
9251 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9253 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9254 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9257 local_got_offsets[r_symndx] |= 1;
9260 value = sgot->output_offset + off;
9262 if (r_type != R_ARM_GOT32)
9263 value += sgot->output_section->vma;
9265 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9266 contents, rel->r_offset, value,
9269 case R_ARM_TLS_LDO32:
9270 value = value - dtpoff_base (info);
9272 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9273 contents, rel->r_offset, value,
9276 case R_ARM_TLS_LDM32:
9283 off = globals->tls_ldm_got.offset;
9289 /* If we don't know the module number, create a relocation
9293 Elf_Internal_Rela outrel;
9295 if (srelgot == NULL)
9298 outrel.r_addend = 0;
9299 outrel.r_offset = (sgot->output_section->vma
9300 + sgot->output_offset + off);
9301 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9303 if (globals->use_rel)
9304 bfd_put_32 (output_bfd, outrel.r_addend,
9305 sgot->contents + off);
9307 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9310 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9312 globals->tls_ldm_got.offset |= 1;
9315 value = sgot->output_section->vma + sgot->output_offset + off
9316 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9318 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9319 contents, rel->r_offset, value,
9323 case R_ARM_TLS_CALL:
9324 case R_ARM_THM_TLS_CALL:
9325 case R_ARM_TLS_GD32:
9326 case R_ARM_TLS_IE32:
9327 case R_ARM_TLS_GOTDESC:
9328 case R_ARM_TLS_DESCSEQ:
9329 case R_ARM_THM_TLS_DESCSEQ:
9331 bfd_vma off, offplt;
9335 BFD_ASSERT (sgot != NULL);
9340 dyn = globals->root.dynamic_sections_created;
9341 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9343 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9345 *unresolved_reloc_p = FALSE;
9348 off = h->got.offset;
9349 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9350 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9354 BFD_ASSERT (local_got_offsets != NULL);
9355 off = local_got_offsets[r_symndx];
9356 offplt = local_tlsdesc_gotents[r_symndx];
9357 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9360 /* Linker relaxations happens from one of the
9361 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9362 if (ELF32_R_TYPE(rel->r_info) != r_type)
9363 tls_type = GOT_TLS_IE;
9365 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9371 bfd_boolean need_relocs = FALSE;
9372 Elf_Internal_Rela outrel;
9375 /* The GOT entries have not been initialized yet. Do it
9376 now, and emit any relocations. If both an IE GOT and a
9377 GD GOT are necessary, we emit the GD first. */
9379 if ((info->shared || indx != 0)
9381 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9382 || h->root.type != bfd_link_hash_undefweak))
9385 BFD_ASSERT (srelgot != NULL);
9388 if (tls_type & GOT_TLS_GDESC)
9392 /* We should have relaxed, unless this is an undefined
9394 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9396 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9397 <= globals->root.sgotplt->size);
9399 outrel.r_addend = 0;
9400 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9401 + globals->root.sgotplt->output_offset
9403 + globals->sgotplt_jump_table_size);
9405 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9406 sreloc = globals->root.srelplt;
9407 loc = sreloc->contents;
9408 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9409 BFD_ASSERT (loc + RELOC_SIZE (globals)
9410 <= sreloc->contents + sreloc->size);
9412 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9414 /* For globals, the first word in the relocation gets
9415 the relocation index and the top bit set, or zero,
9416 if we're binding now. For locals, it gets the
9417 symbol's offset in the tls section. */
9418 bfd_put_32 (output_bfd,
9419 !h ? value - elf_hash_table (info)->tls_sec->vma
9420 : info->flags & DF_BIND_NOW ? 0
9421 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9422 globals->root.sgotplt->contents + offplt
9423 + globals->sgotplt_jump_table_size);
9425 /* Second word in the relocation is always zero. */
9426 bfd_put_32 (output_bfd, 0,
9427 globals->root.sgotplt->contents + offplt
9428 + globals->sgotplt_jump_table_size + 4);
9430 if (tls_type & GOT_TLS_GD)
9434 outrel.r_addend = 0;
9435 outrel.r_offset = (sgot->output_section->vma
9436 + sgot->output_offset
9438 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9440 if (globals->use_rel)
9441 bfd_put_32 (output_bfd, outrel.r_addend,
9442 sgot->contents + cur_off);
9444 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9447 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9448 sgot->contents + cur_off + 4);
9451 outrel.r_addend = 0;
9452 outrel.r_info = ELF32_R_INFO (indx,
9453 R_ARM_TLS_DTPOFF32);
9454 outrel.r_offset += 4;
9456 if (globals->use_rel)
9457 bfd_put_32 (output_bfd, outrel.r_addend,
9458 sgot->contents + cur_off + 4);
9460 elf32_arm_add_dynreloc (output_bfd, info,
9466 /* If we are not emitting relocations for a
9467 general dynamic reference, then we must be in a
9468 static link or an executable link with the
9469 symbol binding locally. Mark it as belonging
9470 to module 1, the executable. */
9471 bfd_put_32 (output_bfd, 1,
9472 sgot->contents + cur_off);
9473 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9474 sgot->contents + cur_off + 4);
9480 if (tls_type & GOT_TLS_IE)
9485 outrel.r_addend = value - dtpoff_base (info);
9487 outrel.r_addend = 0;
9488 outrel.r_offset = (sgot->output_section->vma
9489 + sgot->output_offset
9491 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9493 if (globals->use_rel)
9494 bfd_put_32 (output_bfd, outrel.r_addend,
9495 sgot->contents + cur_off);
9497 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9500 bfd_put_32 (output_bfd, tpoff (info, value),
9501 sgot->contents + cur_off);
9508 local_got_offsets[r_symndx] |= 1;
9511 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9513 else if (tls_type & GOT_TLS_GDESC)
9516 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9517 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9519 bfd_signed_vma offset;
9520 /* TLS stubs are arm mode. The original symbol is a
9521 data object, so branch_type is bogus. */
9522 branch_type = ST_BRANCH_TO_ARM;
9523 enum elf32_arm_stub_type stub_type
9524 = arm_type_of_stub (info, input_section, rel,
9525 st_type, &branch_type,
9526 (struct elf32_arm_link_hash_entry *)h,
9527 globals->tls_trampoline, globals->root.splt,
9528 input_bfd, sym_name);
9530 if (stub_type != arm_stub_none)
9532 struct elf32_arm_stub_hash_entry *stub_entry
9533 = elf32_arm_get_stub_entry
9534 (input_section, globals->root.splt, 0, rel,
9535 globals, stub_type);
9536 offset = (stub_entry->stub_offset
9537 + stub_entry->stub_sec->output_offset
9538 + stub_entry->stub_sec->output_section->vma);
9541 offset = (globals->root.splt->output_section->vma
9542 + globals->root.splt->output_offset
9543 + globals->tls_trampoline);
9545 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9549 offset -= (input_section->output_section->vma
9550 + input_section->output_offset
9551 + rel->r_offset + 8);
9555 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9559 /* Thumb blx encodes the offset in a complicated
9561 unsigned upper_insn, lower_insn;
9564 offset -= (input_section->output_section->vma
9565 + input_section->output_offset
9566 + rel->r_offset + 4);
9568 if (stub_type != arm_stub_none
9569 && arm_stub_is_thumb (stub_type))
9571 lower_insn = 0xd000;
9575 lower_insn = 0xc000;
9576 /* Round up the offset to a word boundary */
9577 offset = (offset + 2) & ~2;
9581 upper_insn = (0xf000
9582 | ((offset >> 12) & 0x3ff)
9584 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
9585 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9586 | ((offset >> 1) & 0x7ff);
9587 bfd_put_16 (input_bfd, upper_insn, hit_data);
9588 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9589 return bfd_reloc_ok;
9592 /* These relocations needs special care, as besides the fact
9593 they point somewhere in .gotplt, the addend must be
9594 adjusted accordingly depending on the type of instruction
9596 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9598 unsigned long data, insn;
9601 data = bfd_get_32 (input_bfd, hit_data);
9607 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9608 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9610 | bfd_get_16 (input_bfd,
9611 contents + rel->r_offset - data + 2);
9612 if ((insn & 0xf800c000) == 0xf000c000)
9615 else if ((insn & 0xffffff00) == 0x4400)
9620 (*_bfd_error_handler)
9621 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9622 input_bfd, input_section,
9623 (unsigned long)rel->r_offset, insn);
9624 return bfd_reloc_notsupported;
9629 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9634 case 0xfa: /* blx */
9638 case 0xe0: /* add */
9643 (*_bfd_error_handler)
9644 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9645 input_bfd, input_section,
9646 (unsigned long)rel->r_offset, insn);
9647 return bfd_reloc_notsupported;
9651 value += ((globals->root.sgotplt->output_section->vma
9652 + globals->root.sgotplt->output_offset + off)
9653 - (input_section->output_section->vma
9654 + input_section->output_offset
9656 + globals->sgotplt_jump_table_size);
9659 value = ((globals->root.sgot->output_section->vma
9660 + globals->root.sgot->output_offset + off)
9661 - (input_section->output_section->vma
9662 + input_section->output_offset + rel->r_offset));
9664 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9665 contents, rel->r_offset, value,
9669 case R_ARM_TLS_LE32:
9670 if (info->shared && !info->pie)
9672 (*_bfd_error_handler)
9673 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9674 input_bfd, input_section,
9675 (long) rel->r_offset, howto->name);
9676 return bfd_reloc_notsupported;
9679 value = tpoff (info, value);
9681 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9682 contents, rel->r_offset, value,
9686 if (globals->fix_v4bx)
9688 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9690 /* Ensure that we have a BX instruction. */
9691 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9693 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9695 /* Branch to veneer. */
9697 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9698 glue_addr -= input_section->output_section->vma
9699 + input_section->output_offset
9700 + rel->r_offset + 8;
9701 insn = (insn & 0xf0000000) | 0x0a000000
9702 | ((glue_addr >> 2) & 0x00ffffff);
9706 /* Preserve Rm (lowest four bits) and the condition code
9707 (highest four bits). Other bits encode MOV PC,Rm. */
9708 insn = (insn & 0xf000000f) | 0x01a0f000;
9711 bfd_put_32 (input_bfd, insn, hit_data);
9713 return bfd_reloc_ok;
9715 case R_ARM_MOVW_ABS_NC:
9716 case R_ARM_MOVT_ABS:
9717 case R_ARM_MOVW_PREL_NC:
9718 case R_ARM_MOVT_PREL:
9719 /* Until we properly support segment-base-relative addressing then
9720 we assume the segment base to be zero, as for the group relocations.
9721 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9722 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9723 case R_ARM_MOVW_BREL_NC:
9724 case R_ARM_MOVW_BREL:
9725 case R_ARM_MOVT_BREL:
9727 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9729 if (globals->use_rel)
9731 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9732 signed_addend = (addend ^ 0x8000) - 0x8000;
9735 value += signed_addend;
9737 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9738 value -= (input_section->output_section->vma
9739 + input_section->output_offset + rel->r_offset);
9741 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9742 return bfd_reloc_overflow;
9744 if (branch_type == ST_BRANCH_TO_THUMB)
9747 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9748 || r_type == R_ARM_MOVT_BREL)
9752 insn |= value & 0xfff;
9753 insn |= (value & 0xf000) << 4;
9754 bfd_put_32 (input_bfd, insn, hit_data);
9756 return bfd_reloc_ok;
9758 case R_ARM_THM_MOVW_ABS_NC:
9759 case R_ARM_THM_MOVT_ABS:
9760 case R_ARM_THM_MOVW_PREL_NC:
9761 case R_ARM_THM_MOVT_PREL:
9762 /* Until we properly support segment-base-relative addressing then
9763 we assume the segment base to be zero, as for the above relocations.
9764 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9765 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9766 as R_ARM_THM_MOVT_ABS. */
9767 case R_ARM_THM_MOVW_BREL_NC:
9768 case R_ARM_THM_MOVW_BREL:
9769 case R_ARM_THM_MOVT_BREL:
9773 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9774 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9776 if (globals->use_rel)
9778 addend = ((insn >> 4) & 0xf000)
9779 | ((insn >> 15) & 0x0800)
9780 | ((insn >> 4) & 0x0700)
9782 signed_addend = (addend ^ 0x8000) - 0x8000;
9785 value += signed_addend;
9787 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9788 value -= (input_section->output_section->vma
9789 + input_section->output_offset + rel->r_offset);
9791 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
9792 return bfd_reloc_overflow;
9794 if (branch_type == ST_BRANCH_TO_THUMB)
9797 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
9798 || r_type == R_ARM_THM_MOVT_BREL)
9802 insn |= (value & 0xf000) << 4;
9803 insn |= (value & 0x0800) << 15;
9804 insn |= (value & 0x0700) << 4;
9805 insn |= (value & 0x00ff);
9807 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9808 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9810 return bfd_reloc_ok;
9812 case R_ARM_ALU_PC_G0_NC:
9813 case R_ARM_ALU_PC_G1_NC:
9814 case R_ARM_ALU_PC_G0:
9815 case R_ARM_ALU_PC_G1:
9816 case R_ARM_ALU_PC_G2:
9817 case R_ARM_ALU_SB_G0_NC:
9818 case R_ARM_ALU_SB_G1_NC:
9819 case R_ARM_ALU_SB_G0:
9820 case R_ARM_ALU_SB_G1:
9821 case R_ARM_ALU_SB_G2:
9823 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9824 bfd_vma pc = input_section->output_section->vma
9825 + input_section->output_offset + rel->r_offset;
9826 /* sb should be the origin of the *segment* containing the symbol.
9827 It is not clear how to obtain this OS-dependent value, so we
9828 make an arbitrary choice of zero. */
9832 bfd_signed_vma signed_value;
9835 /* Determine which group of bits to select. */
9838 case R_ARM_ALU_PC_G0_NC:
9839 case R_ARM_ALU_PC_G0:
9840 case R_ARM_ALU_SB_G0_NC:
9841 case R_ARM_ALU_SB_G0:
9845 case R_ARM_ALU_PC_G1_NC:
9846 case R_ARM_ALU_PC_G1:
9847 case R_ARM_ALU_SB_G1_NC:
9848 case R_ARM_ALU_SB_G1:
9852 case R_ARM_ALU_PC_G2:
9853 case R_ARM_ALU_SB_G2:
9861 /* If REL, extract the addend from the insn. If RELA, it will
9862 have already been fetched for us. */
9863 if (globals->use_rel)
9866 bfd_vma constant = insn & 0xff;
9867 bfd_vma rotation = (insn & 0xf00) >> 8;
9870 signed_addend = constant;
9873 /* Compensate for the fact that in the instruction, the
9874 rotation is stored in multiples of 2 bits. */
9877 /* Rotate "constant" right by "rotation" bits. */
9878 signed_addend = (constant >> rotation) |
9879 (constant << (8 * sizeof (bfd_vma) - rotation));
9882 /* Determine if the instruction is an ADD or a SUB.
9883 (For REL, this determines the sign of the addend.) */
9884 negative = identify_add_or_sub (insn);
9887 (*_bfd_error_handler)
9888 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
9889 input_bfd, input_section,
9890 (long) rel->r_offset, howto->name);
9891 return bfd_reloc_overflow;
9894 signed_addend *= negative;
9897 /* Compute the value (X) to go in the place. */
9898 if (r_type == R_ARM_ALU_PC_G0_NC
9899 || r_type == R_ARM_ALU_PC_G1_NC
9900 || r_type == R_ARM_ALU_PC_G0
9901 || r_type == R_ARM_ALU_PC_G1
9902 || r_type == R_ARM_ALU_PC_G2)
9904 signed_value = value - pc + signed_addend;
9906 /* Section base relative. */
9907 signed_value = value - sb + signed_addend;
9909 /* If the target symbol is a Thumb function, then set the
9910 Thumb bit in the address. */
9911 if (branch_type == ST_BRANCH_TO_THUMB)
9914 /* Calculate the value of the relevant G_n, in encoded
9915 constant-with-rotation format. */
9916 g_n = calculate_group_reloc_mask (abs (signed_value), group,
9919 /* Check for overflow if required. */
9920 if ((r_type == R_ARM_ALU_PC_G0
9921 || r_type == R_ARM_ALU_PC_G1
9922 || r_type == R_ARM_ALU_PC_G2
9923 || r_type == R_ARM_ALU_SB_G0
9924 || r_type == R_ARM_ALU_SB_G1
9925 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
9927 (*_bfd_error_handler)
9928 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9929 input_bfd, input_section,
9930 (long) rel->r_offset, abs (signed_value), howto->name);
9931 return bfd_reloc_overflow;
9934 /* Mask out the value and the ADD/SUB part of the opcode; take care
9935 not to destroy the S bit. */
9938 /* Set the opcode according to whether the value to go in the
9939 place is negative. */
9940 if (signed_value < 0)
9945 /* Encode the offset. */
9948 bfd_put_32 (input_bfd, insn, hit_data);
9950 return bfd_reloc_ok;
9952 case R_ARM_LDR_PC_G0:
9953 case R_ARM_LDR_PC_G1:
9954 case R_ARM_LDR_PC_G2:
9955 case R_ARM_LDR_SB_G0:
9956 case R_ARM_LDR_SB_G1:
9957 case R_ARM_LDR_SB_G2:
9959 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9960 bfd_vma pc = input_section->output_section->vma
9961 + input_section->output_offset + rel->r_offset;
9962 bfd_vma sb = 0; /* See note above. */
9964 bfd_signed_vma signed_value;
9967 /* Determine which groups of bits to calculate. */
9970 case R_ARM_LDR_PC_G0:
9971 case R_ARM_LDR_SB_G0:
9975 case R_ARM_LDR_PC_G1:
9976 case R_ARM_LDR_SB_G1:
9980 case R_ARM_LDR_PC_G2:
9981 case R_ARM_LDR_SB_G2:
9989 /* If REL, extract the addend from the insn. If RELA, it will
9990 have already been fetched for us. */
9991 if (globals->use_rel)
9993 int negative = (insn & (1 << 23)) ? 1 : -1;
9994 signed_addend = negative * (insn & 0xfff);
9997 /* Compute the value (X) to go in the place. */
9998 if (r_type == R_ARM_LDR_PC_G0
9999 || r_type == R_ARM_LDR_PC_G1
10000 || r_type == R_ARM_LDR_PC_G2)
10002 signed_value = value - pc + signed_addend;
10004 /* Section base relative. */
10005 signed_value = value - sb + signed_addend;
10007 /* Calculate the value of the relevant G_{n-1} to obtain
10008 the residual at that stage. */
10009 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10011 /* Check for overflow. */
10012 if (residual >= 0x1000)
10014 (*_bfd_error_handler)
10015 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10016 input_bfd, input_section,
10017 (long) rel->r_offset, abs (signed_value), howto->name);
10018 return bfd_reloc_overflow;
10021 /* Mask out the value and U bit. */
10022 insn &= 0xff7ff000;
10024 /* Set the U bit if the value to go in the place is non-negative. */
10025 if (signed_value >= 0)
10028 /* Encode the offset. */
10031 bfd_put_32 (input_bfd, insn, hit_data);
10033 return bfd_reloc_ok;
10035 case R_ARM_LDRS_PC_G0:
10036 case R_ARM_LDRS_PC_G1:
10037 case R_ARM_LDRS_PC_G2:
10038 case R_ARM_LDRS_SB_G0:
10039 case R_ARM_LDRS_SB_G1:
10040 case R_ARM_LDRS_SB_G2:
10042 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10043 bfd_vma pc = input_section->output_section->vma
10044 + input_section->output_offset + rel->r_offset;
10045 bfd_vma sb = 0; /* See note above. */
10047 bfd_signed_vma signed_value;
10050 /* Determine which groups of bits to calculate. */
10053 case R_ARM_LDRS_PC_G0:
10054 case R_ARM_LDRS_SB_G0:
10058 case R_ARM_LDRS_PC_G1:
10059 case R_ARM_LDRS_SB_G1:
10063 case R_ARM_LDRS_PC_G2:
10064 case R_ARM_LDRS_SB_G2:
10072 /* If REL, extract the addend from the insn. If RELA, it will
10073 have already been fetched for us. */
10074 if (globals->use_rel)
10076 int negative = (insn & (1 << 23)) ? 1 : -1;
10077 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10080 /* Compute the value (X) to go in the place. */
10081 if (r_type == R_ARM_LDRS_PC_G0
10082 || r_type == R_ARM_LDRS_PC_G1
10083 || r_type == R_ARM_LDRS_PC_G2)
10085 signed_value = value - pc + signed_addend;
10087 /* Section base relative. */
10088 signed_value = value - sb + signed_addend;
10090 /* Calculate the value of the relevant G_{n-1} to obtain
10091 the residual at that stage. */
10092 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10094 /* Check for overflow. */
10095 if (residual >= 0x100)
10097 (*_bfd_error_handler)
10098 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10099 input_bfd, input_section,
10100 (long) rel->r_offset, abs (signed_value), howto->name);
10101 return bfd_reloc_overflow;
10104 /* Mask out the value and U bit. */
10105 insn &= 0xff7ff0f0;
10107 /* Set the U bit if the value to go in the place is non-negative. */
10108 if (signed_value >= 0)
10111 /* Encode the offset. */
10112 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10114 bfd_put_32 (input_bfd, insn, hit_data);
10116 return bfd_reloc_ok;
10118 case R_ARM_LDC_PC_G0:
10119 case R_ARM_LDC_PC_G1:
10120 case R_ARM_LDC_PC_G2:
10121 case R_ARM_LDC_SB_G0:
10122 case R_ARM_LDC_SB_G1:
10123 case R_ARM_LDC_SB_G2:
10125 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10126 bfd_vma pc = input_section->output_section->vma
10127 + input_section->output_offset + rel->r_offset;
10128 bfd_vma sb = 0; /* See note above. */
10130 bfd_signed_vma signed_value;
10133 /* Determine which groups of bits to calculate. */
10136 case R_ARM_LDC_PC_G0:
10137 case R_ARM_LDC_SB_G0:
10141 case R_ARM_LDC_PC_G1:
10142 case R_ARM_LDC_SB_G1:
10146 case R_ARM_LDC_PC_G2:
10147 case R_ARM_LDC_SB_G2:
10155 /* If REL, extract the addend from the insn. If RELA, it will
10156 have already been fetched for us. */
10157 if (globals->use_rel)
10159 int negative = (insn & (1 << 23)) ? 1 : -1;
10160 signed_addend = negative * ((insn & 0xff) << 2);
10163 /* Compute the value (X) to go in the place. */
10164 if (r_type == R_ARM_LDC_PC_G0
10165 || r_type == R_ARM_LDC_PC_G1
10166 || r_type == R_ARM_LDC_PC_G2)
10168 signed_value = value - pc + signed_addend;
10170 /* Section base relative. */
10171 signed_value = value - sb + signed_addend;
10173 /* Calculate the value of the relevant G_{n-1} to obtain
10174 the residual at that stage. */
10175 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10177 /* Check for overflow. (The absolute value to go in the place must be
10178 divisible by four and, after having been divided by four, must
10179 fit in eight bits.) */
10180 if ((residual & 0x3) != 0 || residual >= 0x400)
10182 (*_bfd_error_handler)
10183 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10184 input_bfd, input_section,
10185 (long) rel->r_offset, abs (signed_value), howto->name);
10186 return bfd_reloc_overflow;
10189 /* Mask out the value and U bit. */
10190 insn &= 0xff7fff00;
10192 /* Set the U bit if the value to go in the place is non-negative. */
10193 if (signed_value >= 0)
10196 /* Encode the offset. */
10197 insn |= residual >> 2;
10199 bfd_put_32 (input_bfd, insn, hit_data);
10201 return bfd_reloc_ok;
10204 return bfd_reloc_notsupported;
10208 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
10210 arm_add_to_rel (bfd * abfd,
10211 bfd_byte * address,
10212 reloc_howto_type * howto,
10213 bfd_signed_vma increment)
10215 bfd_signed_vma addend;
10217 if (howto->type == R_ARM_THM_CALL
10218 || howto->type == R_ARM_THM_JUMP24)
10220 int upper_insn, lower_insn;
10223 upper_insn = bfd_get_16 (abfd, address);
10224 lower_insn = bfd_get_16 (abfd, address + 2);
10225 upper = upper_insn & 0x7ff;
10226 lower = lower_insn & 0x7ff;
10228 addend = (upper << 12) | (lower << 1);
10229 addend += increment;
10232 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10233 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10235 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10236 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10242 contents = bfd_get_32 (abfd, address);
10244 /* Get the (signed) value from the instruction. */
10245 addend = contents & howto->src_mask;
10246 if (addend & ((howto->src_mask + 1) >> 1))
10248 bfd_signed_vma mask;
10251 mask &= ~ howto->src_mask;
10255 /* Add in the increment, (which is a byte value). */
10256 switch (howto->type)
10259 addend += increment;
10266 addend <<= howto->size;
10267 addend += increment;
10269 /* Should we check for overflow here ? */
10271 /* Drop any undesired bits. */
10272 addend >>= howto->rightshift;
10276 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10278 bfd_put_32 (abfd, contents, address);
10282 #define IS_ARM_TLS_RELOC(R_TYPE) \
10283 ((R_TYPE) == R_ARM_TLS_GD32 \
10284 || (R_TYPE) == R_ARM_TLS_LDO32 \
10285 || (R_TYPE) == R_ARM_TLS_LDM32 \
10286 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10287 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10288 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10289 || (R_TYPE) == R_ARM_TLS_LE32 \
10290 || (R_TYPE) == R_ARM_TLS_IE32 \
10291 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10293 /* Specific set of relocations for the gnu tls dialect. */
10294 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10295 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10296 || (R_TYPE) == R_ARM_TLS_CALL \
10297 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10298 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10299 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10301 /* Relocate an ARM ELF section. */
10304 elf32_arm_relocate_section (bfd * output_bfd,
10305 struct bfd_link_info * info,
10307 asection * input_section,
10308 bfd_byte * contents,
10309 Elf_Internal_Rela * relocs,
10310 Elf_Internal_Sym * local_syms,
10311 asection ** local_sections)
10313 Elf_Internal_Shdr *symtab_hdr;
10314 struct elf_link_hash_entry **sym_hashes;
10315 Elf_Internal_Rela *rel;
10316 Elf_Internal_Rela *relend;
10318 struct elf32_arm_link_hash_table * globals;
10320 globals = elf32_arm_hash_table (info);
10321 if (globals == NULL)
10324 symtab_hdr = & elf_symtab_hdr (input_bfd);
10325 sym_hashes = elf_sym_hashes (input_bfd);
10328 relend = relocs + input_section->reloc_count;
10329 for (; rel < relend; rel++)
10332 reloc_howto_type * howto;
10333 unsigned long r_symndx;
10334 Elf_Internal_Sym * sym;
10336 struct elf_link_hash_entry * h;
10337 bfd_vma relocation;
10338 bfd_reloc_status_type r;
10341 bfd_boolean unresolved_reloc = FALSE;
10342 char *error_message = NULL;
10344 r_symndx = ELF32_R_SYM (rel->r_info);
10345 r_type = ELF32_R_TYPE (rel->r_info);
10346 r_type = arm_real_reloc_type (globals, r_type);
10348 if ( r_type == R_ARM_GNU_VTENTRY
10349 || r_type == R_ARM_GNU_VTINHERIT)
10352 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10353 howto = bfd_reloc.howto;
10359 if (r_symndx < symtab_hdr->sh_info)
10361 sym = local_syms + r_symndx;
10362 sym_type = ELF32_ST_TYPE (sym->st_info);
10363 sec = local_sections[r_symndx];
10365 /* An object file might have a reference to a local
10366 undefined symbol. This is a daft object file, but we
10367 should at least do something about it. V4BX & NONE
10368 relocations do not use the symbol and are explicitly
10369 allowed to use the undefined symbol, so allow those.
10370 Likewise for relocations against STN_UNDEF. */
10371 if (r_type != R_ARM_V4BX
10372 && r_type != R_ARM_NONE
10373 && r_symndx != STN_UNDEF
10374 && bfd_is_und_section (sec)
10375 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10377 if (!info->callbacks->undefined_symbol
10378 (info, bfd_elf_string_from_elf_section
10379 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10380 input_bfd, input_section,
10381 rel->r_offset, TRUE))
10385 if (globals->use_rel)
10387 relocation = (sec->output_section->vma
10388 + sec->output_offset
10390 if (!info->relocatable
10391 && (sec->flags & SEC_MERGE)
10392 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10395 bfd_vma addend, value;
10399 case R_ARM_MOVW_ABS_NC:
10400 case R_ARM_MOVT_ABS:
10401 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10402 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10403 addend = (addend ^ 0x8000) - 0x8000;
10406 case R_ARM_THM_MOVW_ABS_NC:
10407 case R_ARM_THM_MOVT_ABS:
10408 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10410 value |= bfd_get_16 (input_bfd,
10411 contents + rel->r_offset + 2);
10412 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10413 | ((value & 0x04000000) >> 15);
10414 addend = (addend ^ 0x8000) - 0x8000;
10418 if (howto->rightshift
10419 || (howto->src_mask & (howto->src_mask + 1)))
10421 (*_bfd_error_handler)
10422 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10423 input_bfd, input_section,
10424 (long) rel->r_offset, howto->name);
10428 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10430 /* Get the (signed) value from the instruction. */
10431 addend = value & howto->src_mask;
10432 if (addend & ((howto->src_mask + 1) >> 1))
10434 bfd_signed_vma mask;
10437 mask &= ~ howto->src_mask;
10445 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10447 addend += msec->output_section->vma + msec->output_offset;
10449 /* Cases here must match those in the preceding
10450 switch statement. */
10453 case R_ARM_MOVW_ABS_NC:
10454 case R_ARM_MOVT_ABS:
10455 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10456 | (addend & 0xfff);
10457 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10460 case R_ARM_THM_MOVW_ABS_NC:
10461 case R_ARM_THM_MOVT_ABS:
10462 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10463 | (addend & 0xff) | ((addend & 0x0800) << 15);
10464 bfd_put_16 (input_bfd, value >> 16,
10465 contents + rel->r_offset);
10466 bfd_put_16 (input_bfd, value,
10467 contents + rel->r_offset + 2);
10471 value = (value & ~ howto->dst_mask)
10472 | (addend & howto->dst_mask);
10473 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10479 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10483 bfd_boolean warned;
10485 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10486 r_symndx, symtab_hdr, sym_hashes,
10487 h, sec, relocation,
10488 unresolved_reloc, warned);
10490 sym_type = h->type;
10493 if (sec != NULL && discarded_section (sec))
10494 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10495 rel, 1, relend, howto, 0, contents);
10497 if (info->relocatable)
10499 /* This is a relocatable link. We don't have to change
10500 anything, unless the reloc is against a section symbol,
10501 in which case we have to adjust according to where the
10502 section symbol winds up in the output section. */
10503 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10505 if (globals->use_rel)
10506 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10507 howto, (bfd_signed_vma) sec->output_offset);
10509 rel->r_addend += sec->output_offset;
10515 name = h->root.root.string;
10518 name = (bfd_elf_string_from_elf_section
10519 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10520 if (name == NULL || *name == '\0')
10521 name = bfd_section_name (input_bfd, sec);
10524 if (r_symndx != STN_UNDEF
10525 && r_type != R_ARM_NONE
10527 || h->root.type == bfd_link_hash_defined
10528 || h->root.type == bfd_link_hash_defweak)
10529 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10531 (*_bfd_error_handler)
10532 ((sym_type == STT_TLS
10533 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10534 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10537 (long) rel->r_offset,
10542 /* We call elf32_arm_final_link_relocate unless we're completely
10543 done, i.e., the relaxation produced the final output we want,
10544 and we won't let anybody mess with it. Also, we have to do
10545 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10546 both in relaxed and non-relaxed cases */
10547 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10548 || (IS_ARM_TLS_GNU_RELOC (r_type)
10549 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10550 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10553 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10554 contents, rel, h == NULL);
10555 /* This may have been marked unresolved because it came from
10556 a shared library. But we've just dealt with that. */
10557 unresolved_reloc = 0;
10560 r = bfd_reloc_continue;
10562 if (r == bfd_reloc_continue)
10563 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10564 input_section, contents, rel,
10565 relocation, info, sec, name, sym_type,
10566 (h ? h->target_internal
10567 : ARM_SYM_BRANCH_TYPE (sym)), h,
10568 &unresolved_reloc, &error_message);
10570 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10571 because such sections are not SEC_ALLOC and thus ld.so will
10572 not process them. */
10573 if (unresolved_reloc
10574 && !((input_section->flags & SEC_DEBUGGING) != 0
10576 && _bfd_elf_section_offset (output_bfd, info, input_section,
10577 rel->r_offset) != (bfd_vma) -1)
10579 (*_bfd_error_handler)
10580 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10583 (long) rel->r_offset,
10585 h->root.root.string);
10589 if (r != bfd_reloc_ok)
10593 case bfd_reloc_overflow:
10594 /* If the overflowing reloc was to an undefined symbol,
10595 we have already printed one error message and there
10596 is no point complaining again. */
10598 h->root.type != bfd_link_hash_undefined)
10599 && (!((*info->callbacks->reloc_overflow)
10600 (info, (h ? &h->root : NULL), name, howto->name,
10601 (bfd_vma) 0, input_bfd, input_section,
10606 case bfd_reloc_undefined:
10607 if (!((*info->callbacks->undefined_symbol)
10608 (info, name, input_bfd, input_section,
10609 rel->r_offset, TRUE)))
10613 case bfd_reloc_outofrange:
10614 error_message = _("out of range");
10617 case bfd_reloc_notsupported:
10618 error_message = _("unsupported relocation");
10621 case bfd_reloc_dangerous:
10622 /* error_message should already be set. */
10626 error_message = _("unknown error");
10627 /* Fall through. */
10630 BFD_ASSERT (error_message != NULL);
10631 if (!((*info->callbacks->reloc_dangerous)
10632 (info, error_message, input_bfd, input_section,
10643 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10644 adds the edit to the start of the list. (The list must be built in order of
10645 ascending TINDEX: the function's callers are primarily responsible for
10646 maintaining that condition). */
10649 add_unwind_table_edit (arm_unwind_table_edit **head,
10650 arm_unwind_table_edit **tail,
10651 arm_unwind_edit_type type,
10652 asection *linked_section,
10653 unsigned int tindex)
10655 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10656 xmalloc (sizeof (arm_unwind_table_edit));
10658 new_edit->type = type;
10659 new_edit->linked_section = linked_section;
10660 new_edit->index = tindex;
10664 new_edit->next = NULL;
10667 (*tail)->next = new_edit;
10669 (*tail) = new_edit;
10672 (*head) = new_edit;
10676 new_edit->next = *head;
10685 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10687 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10689 adjust_exidx_size(asection *exidx_sec, int adjust)
10693 if (!exidx_sec->rawsize)
10694 exidx_sec->rawsize = exidx_sec->size;
10696 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10697 out_sec = exidx_sec->output_section;
10698 /* Adjust size of output section. */
10699 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10702 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10704 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10706 struct _arm_elf_section_data *exidx_arm_data;
10708 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10709 add_unwind_table_edit (
10710 &exidx_arm_data->u.exidx.unwind_edit_list,
10711 &exidx_arm_data->u.exidx.unwind_edit_tail,
10712 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10714 adjust_exidx_size(exidx_sec, 8);
10717 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10718 made to those tables, such that:
10720 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10721 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10722 codes which have been inlined into the index).
10724 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10726 The edits are applied when the tables are written
10727 (in elf32_arm_write_section). */
10730 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10731 unsigned int num_text_sections,
10732 struct bfd_link_info *info,
10733 bfd_boolean merge_exidx_entries)
10736 unsigned int last_second_word = 0, i;
10737 asection *last_exidx_sec = NULL;
10738 asection *last_text_sec = NULL;
10739 int last_unwind_type = -1;
10741 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10743 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
10747 for (sec = inp->sections; sec != NULL; sec = sec->next)
10749 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10750 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10752 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10755 if (elf_sec->linked_to)
10757 Elf_Internal_Shdr *linked_hdr
10758 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10759 struct _arm_elf_section_data *linked_sec_arm_data
10760 = get_arm_elf_section_data (linked_hdr->bfd_section);
10762 if (linked_sec_arm_data == NULL)
10765 /* Link this .ARM.exidx section back from the text section it
10767 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10772 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10773 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10774 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10776 for (i = 0; i < num_text_sections; i++)
10778 asection *sec = text_section_order[i];
10779 asection *exidx_sec;
10780 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10781 struct _arm_elf_section_data *exidx_arm_data;
10782 bfd_byte *contents = NULL;
10783 int deleted_exidx_bytes = 0;
10785 arm_unwind_table_edit *unwind_edit_head = NULL;
10786 arm_unwind_table_edit *unwind_edit_tail = NULL;
10787 Elf_Internal_Shdr *hdr;
10790 if (arm_data == NULL)
10793 exidx_sec = arm_data->u.text.arm_exidx_sec;
10794 if (exidx_sec == NULL)
10796 /* Section has no unwind data. */
10797 if (last_unwind_type == 0 || !last_exidx_sec)
10800 /* Ignore zero sized sections. */
10801 if (sec->size == 0)
10804 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10805 last_unwind_type = 0;
10809 /* Skip /DISCARD/ sections. */
10810 if (bfd_is_abs_section (exidx_sec->output_section))
10813 hdr = &elf_section_data (exidx_sec)->this_hdr;
10814 if (hdr->sh_type != SHT_ARM_EXIDX)
10817 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10818 if (exidx_arm_data == NULL)
10821 ibfd = exidx_sec->owner;
10823 if (hdr->contents != NULL)
10824 contents = hdr->contents;
10825 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
10829 for (j = 0; j < hdr->sh_size; j += 8)
10831 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
10835 /* An EXIDX_CANTUNWIND entry. */
10836 if (second_word == 1)
10838 if (last_unwind_type == 0)
10842 /* Inlined unwinding data. Merge if equal to previous. */
10843 else if ((second_word & 0x80000000) != 0)
10845 if (merge_exidx_entries
10846 && last_second_word == second_word && last_unwind_type == 1)
10849 last_second_word = second_word;
10851 /* Normal table entry. In theory we could merge these too,
10852 but duplicate entries are likely to be much less common. */
10858 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
10859 DELETE_EXIDX_ENTRY, NULL, j / 8);
10861 deleted_exidx_bytes += 8;
10864 last_unwind_type = unwind_type;
10867 /* Free contents if we allocated it ourselves. */
10868 if (contents != hdr->contents)
10871 /* Record edits to be applied later (in elf32_arm_write_section). */
10872 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
10873 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
10875 if (deleted_exidx_bytes > 0)
10876 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
10878 last_exidx_sec = exidx_sec;
10879 last_text_sec = sec;
10882 /* Add terminating CANTUNWIND entry. */
10883 if (last_exidx_sec && last_unwind_type != 0)
10884 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10890 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
10891 bfd *ibfd, const char *name)
10893 asection *sec, *osec;
10895 sec = bfd_get_linker_section (ibfd, name);
10896 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
10899 osec = sec->output_section;
10900 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
10903 if (! bfd_set_section_contents (obfd, osec, sec->contents,
10904 sec->output_offset, sec->size))
10911 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
10913 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
10914 asection *sec, *osec;
10916 if (globals == NULL)
10919 /* Invoke the regular ELF backend linker to do all the work. */
10920 if (!bfd_elf_final_link (abfd, info))
10923 /* Process stub sections (eg BE8 encoding, ...). */
10924 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
10926 for (i=0; i<htab->top_id; i++)
10928 sec = htab->stub_group[i].stub_sec;
10929 /* Only process it once, in its link_sec slot. */
10930 if (sec && i == htab->stub_group[i].link_sec->id)
10932 osec = sec->output_section;
10933 elf32_arm_write_section (abfd, info, sec, sec->contents);
10934 if (! bfd_set_section_contents (abfd, osec, sec->contents,
10935 sec->output_offset, sec->size))
10940 /* Write out any glue sections now that we have created all the
10942 if (globals->bfd_of_glue_owner != NULL)
10944 if (! elf32_arm_output_glue_section (info, abfd,
10945 globals->bfd_of_glue_owner,
10946 ARM2THUMB_GLUE_SECTION_NAME))
10949 if (! elf32_arm_output_glue_section (info, abfd,
10950 globals->bfd_of_glue_owner,
10951 THUMB2ARM_GLUE_SECTION_NAME))
10954 if (! elf32_arm_output_glue_section (info, abfd,
10955 globals->bfd_of_glue_owner,
10956 VFP11_ERRATUM_VENEER_SECTION_NAME))
10959 if (! elf32_arm_output_glue_section (info, abfd,
10960 globals->bfd_of_glue_owner,
10961 ARM_BX_GLUE_SECTION_NAME))
10968 /* Return a best guess for the machine number based on the attributes. */
10970 static unsigned int
10971 bfd_arm_get_mach_from_attributes (bfd * abfd)
10973 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
10977 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
10978 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
10979 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
10981 case TAG_CPU_ARCH_V5TE:
10985 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
10986 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
10990 if (strcmp (name, "IWMMXT2") == 0)
10991 return bfd_mach_arm_iWMMXt2;
10993 if (strcmp (name, "IWMMXT") == 0)
10994 return bfd_mach_arm_iWMMXt;
10997 return bfd_mach_arm_5TE;
11001 return bfd_mach_arm_unknown;
11005 /* Set the right machine number. */
11008 elf32_arm_object_p (bfd *abfd)
11012 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11014 if (mach == bfd_mach_arm_unknown)
11016 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11017 mach = bfd_mach_arm_ep9312;
11019 mach = bfd_arm_get_mach_from_attributes (abfd);
11022 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11026 /* Function to keep ARM specific flags in the ELF header. */
11029 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11031 if (elf_flags_init (abfd)
11032 && elf_elfheader (abfd)->e_flags != flags)
11034 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11036 if (flags & EF_ARM_INTERWORK)
11037 (*_bfd_error_handler)
11038 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11042 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11048 elf_elfheader (abfd)->e_flags = flags;
11049 elf_flags_init (abfd) = TRUE;
11055 /* Copy backend specific data from one object module to another. */
11058 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11061 flagword out_flags;
11063 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11066 in_flags = elf_elfheader (ibfd)->e_flags;
11067 out_flags = elf_elfheader (obfd)->e_flags;
11069 if (elf_flags_init (obfd)
11070 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11071 && in_flags != out_flags)
11073 /* Cannot mix APCS26 and APCS32 code. */
11074 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11077 /* Cannot mix float APCS and non-float APCS code. */
11078 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11081 /* If the src and dest have different interworking flags
11082 then turn off the interworking bit. */
11083 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11085 if (out_flags & EF_ARM_INTERWORK)
11087 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11090 in_flags &= ~EF_ARM_INTERWORK;
11093 /* Likewise for PIC, though don't warn for this case. */
11094 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11095 in_flags &= ~EF_ARM_PIC;
11098 elf_elfheader (obfd)->e_flags = in_flags;
11099 elf_flags_init (obfd) = TRUE;
11101 /* Also copy the EI_OSABI field. */
11102 elf_elfheader (obfd)->e_ident[EI_OSABI] =
11103 elf_elfheader (ibfd)->e_ident[EI_OSABI];
11105 /* Copy object attributes. */
11106 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11111 /* Values for Tag_ABI_PCS_R9_use. */
11120 /* Values for Tag_ABI_PCS_RW_data. */
11123 AEABI_PCS_RW_data_absolute,
11124 AEABI_PCS_RW_data_PCrel,
11125 AEABI_PCS_RW_data_SBrel,
11126 AEABI_PCS_RW_data_unused
11129 /* Values for Tag_ABI_enum_size. */
11135 AEABI_enum_forced_wide
11138 /* Determine whether an object attribute tag takes an integer, a
11142 elf32_arm_obj_attrs_arg_type (int tag)
11144 if (tag == Tag_compatibility)
11145 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11146 else if (tag == Tag_nodefaults)
11147 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11148 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11149 return ATTR_TYPE_FLAG_STR_VAL;
11151 return ATTR_TYPE_FLAG_INT_VAL;
11153 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11156 /* The ABI defines that Tag_conformance should be emitted first, and that
11157 Tag_nodefaults should be second (if either is defined). This sets those
11158 two positions, and bumps up the position of all the remaining tags to
11161 elf32_arm_obj_attrs_order (int num)
11163 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
11164 return Tag_conformance;
11165 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
11166 return Tag_nodefaults;
11167 if ((num - 2) < Tag_nodefaults)
11169 if ((num - 1) < Tag_conformance)
11174 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
11176 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
11178 if ((tag & 127) < 64)
11181 (_("%B: Unknown mandatory EABI object attribute %d"),
11183 bfd_set_error (bfd_error_bad_value);
11189 (_("Warning: %B: Unknown EABI object attribute %d"),
11195 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
11196 Returns -1 if no architecture could be read. */
11199 get_secondary_compatible_arch (bfd *abfd)
11201 obj_attribute *attr =
11202 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11204 /* Note: the tag and its argument below are uleb128 values, though
11205 currently-defined values fit in one byte for each. */
11207 && attr->s[0] == Tag_CPU_arch
11208 && (attr->s[1] & 128) != 128
11209 && attr->s[2] == 0)
11212 /* This tag is "safely ignorable", so don't complain if it looks funny. */
11216 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
11217 The tag is removed if ARCH is -1. */
11220 set_secondary_compatible_arch (bfd *abfd, int arch)
11222 obj_attribute *attr =
11223 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11231 /* Note: the tag and its argument below are uleb128 values, though
11232 currently-defined values fit in one byte for each. */
11234 attr->s = (char *) bfd_alloc (abfd, 3);
11235 attr->s[0] = Tag_CPU_arch;
11240 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
11244 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
11245 int newtag, int secondary_compat)
11247 #define T(X) TAG_CPU_ARCH_##X
11248 int tagl, tagh, result;
11251 T(V6T2), /* PRE_V4. */
11253 T(V6T2), /* V4T. */
11254 T(V6T2), /* V5T. */
11255 T(V6T2), /* V5TE. */
11256 T(V6T2), /* V5TEJ. */
11259 T(V6T2) /* V6T2. */
11263 T(V6K), /* PRE_V4. */
11267 T(V6K), /* V5TE. */
11268 T(V6K), /* V5TEJ. */
11270 T(V6KZ), /* V6KZ. */
11276 T(V7), /* PRE_V4. */
11281 T(V7), /* V5TEJ. */
11294 T(V6K), /* V5TE. */
11295 T(V6K), /* V5TEJ. */
11297 T(V6KZ), /* V6KZ. */
11301 T(V6_M) /* V6_M. */
11303 const int v6s_m[] =
11309 T(V6K), /* V5TE. */
11310 T(V6K), /* V5TEJ. */
11312 T(V6KZ), /* V6KZ. */
11316 T(V6S_M), /* V6_M. */
11317 T(V6S_M) /* V6S_M. */
11319 const int v7e_m[] =
11323 T(V7E_M), /* V4T. */
11324 T(V7E_M), /* V5T. */
11325 T(V7E_M), /* V5TE. */
11326 T(V7E_M), /* V5TEJ. */
11327 T(V7E_M), /* V6. */
11328 T(V7E_M), /* V6KZ. */
11329 T(V7E_M), /* V6T2. */
11330 T(V7E_M), /* V6K. */
11331 T(V7E_M), /* V7. */
11332 T(V7E_M), /* V6_M. */
11333 T(V7E_M), /* V6S_M. */
11334 T(V7E_M) /* V7E_M. */
11338 T(V8), /* PRE_V4. */
11343 T(V8), /* V5TEJ. */
11350 T(V8), /* V6S_M. */
11351 T(V8), /* V7E_M. */
11354 const int v4t_plus_v6_m[] =
11360 T(V5TE), /* V5TE. */
11361 T(V5TEJ), /* V5TEJ. */
11363 T(V6KZ), /* V6KZ. */
11364 T(V6T2), /* V6T2. */
11367 T(V6_M), /* V6_M. */
11368 T(V6S_M), /* V6S_M. */
11369 T(V7E_M), /* V7E_M. */
11371 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11373 const int *comb[] =
11382 /* Pseudo-architecture. */
11386 /* Check we've not got a higher architecture than we know about. */
11388 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11390 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11394 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11396 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11397 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11398 oldtag = T(V4T_PLUS_V6_M);
11400 /* And override the new tag if we have a Tag_also_compatible_with on the
11403 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11404 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11405 newtag = T(V4T_PLUS_V6_M);
11407 tagl = (oldtag < newtag) ? oldtag : newtag;
11408 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11410 /* Architectures before V6KZ add features monotonically. */
11411 if (tagh <= TAG_CPU_ARCH_V6KZ)
11414 result = comb[tagh - T(V6T2)][tagl];
11416 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11417 as the canonical version. */
11418 if (result == T(V4T_PLUS_V6_M))
11421 *secondary_compat_out = T(V6_M);
11424 *secondary_compat_out = -1;
11428 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11429 ibfd, oldtag, newtag);
11437 /* Query attributes object to see if integer divide instructions may be
11438 present in an object. */
11440 elf32_arm_attributes_accept_div (const obj_attribute *attr)
11442 int arch = attr[Tag_CPU_arch].i;
11443 int profile = attr[Tag_CPU_arch_profile].i;
11445 switch (attr[Tag_DIV_use].i)
11448 /* Integer divide allowed if instruction contained in archetecture. */
11449 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
11451 else if (arch >= TAG_CPU_ARCH_V7E_M)
11457 /* Integer divide explicitly prohibited. */
11461 /* Unrecognised case - treat as allowing divide everywhere. */
11463 /* Integer divide allowed in ARM state. */
11468 /* Query attributes object to see if integer divide instructions are
11469 forbidden to be in the object. This is not the inverse of
11470 elf32_arm_attributes_accept_div. */
11472 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
11474 return attr[Tag_DIV_use].i == 1;
11477 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11478 are conflicting attributes. */
11481 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11483 obj_attribute *in_attr;
11484 obj_attribute *out_attr;
11485 /* Some tags have 0 = don't care, 1 = strong requirement,
11486 2 = weak requirement. */
11487 static const int order_021[3] = {0, 2, 1};
11489 bfd_boolean result = TRUE;
11491 /* Skip the linker stubs file. This preserves previous behavior
11492 of accepting unknown attributes in the first input file - but
11494 if (ibfd->flags & BFD_LINKER_CREATED)
11497 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11499 /* This is the first object. Copy the attributes. */
11500 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11502 out_attr = elf_known_obj_attributes_proc (obfd);
11504 /* Use the Tag_null value to indicate the attributes have been
11508 /* We do not output objects with Tag_MPextension_use_legacy - we move
11509 the attribute's value to Tag_MPextension_use. */
11510 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11512 if (out_attr[Tag_MPextension_use].i != 0
11513 && out_attr[Tag_MPextension_use_legacy].i
11514 != out_attr[Tag_MPextension_use].i)
11517 (_("Error: %B has both the current and legacy "
11518 "Tag_MPextension_use attributes"), ibfd);
11522 out_attr[Tag_MPextension_use] =
11523 out_attr[Tag_MPextension_use_legacy];
11524 out_attr[Tag_MPextension_use_legacy].type = 0;
11525 out_attr[Tag_MPextension_use_legacy].i = 0;
11531 in_attr = elf_known_obj_attributes_proc (ibfd);
11532 out_attr = elf_known_obj_attributes_proc (obfd);
11533 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11534 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11536 /* Ignore mismatches if the object doesn't use floating point. */
11537 if (out_attr[Tag_ABI_FP_number_model].i == 0)
11538 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11539 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
11542 (_("error: %B uses VFP register arguments, %B does not"),
11543 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11544 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11549 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11551 /* Merge this attribute with existing attributes. */
11554 case Tag_CPU_raw_name:
11556 /* These are merged after Tag_CPU_arch. */
11559 case Tag_ABI_optimization_goals:
11560 case Tag_ABI_FP_optimization_goals:
11561 /* Use the first value seen. */
11566 int secondary_compat = -1, secondary_compat_out = -1;
11567 unsigned int saved_out_attr = out_attr[i].i;
11568 static const char *name_table[] = {
11569 /* These aren't real CPU names, but we can't guess
11570 that from the architecture version alone. */
11587 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11588 secondary_compat = get_secondary_compatible_arch (ibfd);
11589 secondary_compat_out = get_secondary_compatible_arch (obfd);
11590 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11591 &secondary_compat_out,
11594 set_secondary_compatible_arch (obfd, secondary_compat_out);
11596 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11597 if (out_attr[i].i == saved_out_attr)
11598 ; /* Leave the names alone. */
11599 else if (out_attr[i].i == in_attr[i].i)
11601 /* The output architecture has been changed to match the
11602 input architecture. Use the input names. */
11603 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11604 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11606 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11607 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11612 out_attr[Tag_CPU_name].s = NULL;
11613 out_attr[Tag_CPU_raw_name].s = NULL;
11616 /* If we still don't have a value for Tag_CPU_name,
11617 make one up now. Tag_CPU_raw_name remains blank. */
11618 if (out_attr[Tag_CPU_name].s == NULL
11619 && out_attr[i].i < ARRAY_SIZE (name_table))
11620 out_attr[Tag_CPU_name].s =
11621 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11625 case Tag_ARM_ISA_use:
11626 case Tag_THUMB_ISA_use:
11627 case Tag_WMMX_arch:
11628 case Tag_Advanced_SIMD_arch:
11629 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11630 case Tag_ABI_FP_rounding:
11631 case Tag_ABI_FP_exceptions:
11632 case Tag_ABI_FP_user_exceptions:
11633 case Tag_ABI_FP_number_model:
11634 case Tag_FP_HP_extension:
11635 case Tag_CPU_unaligned_access:
11637 case Tag_MPextension_use:
11638 /* Use the largest value specified. */
11639 if (in_attr[i].i > out_attr[i].i)
11640 out_attr[i].i = in_attr[i].i;
11643 case Tag_ABI_align_preserved:
11644 case Tag_ABI_PCS_RO_data:
11645 /* Use the smallest value specified. */
11646 if (in_attr[i].i < out_attr[i].i)
11647 out_attr[i].i = in_attr[i].i;
11650 case Tag_ABI_align_needed:
11651 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11652 && (in_attr[Tag_ABI_align_preserved].i == 0
11653 || out_attr[Tag_ABI_align_preserved].i == 0))
11655 /* This error message should be enabled once all non-conformant
11656 binaries in the toolchain have had the attributes set
11659 (_("error: %B: 8-byte data alignment conflicts with %B"),
11663 /* Fall through. */
11664 case Tag_ABI_FP_denormal:
11665 case Tag_ABI_PCS_GOT_use:
11666 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11667 value if greater than 2 (for future-proofing). */
11668 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11669 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11670 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11671 out_attr[i].i = in_attr[i].i;
11674 case Tag_Virtualization_use:
11675 /* The virtualization tag effectively stores two bits of
11676 information: the intended use of TrustZone (in bit 0), and the
11677 intended use of Virtualization (in bit 1). */
11678 if (out_attr[i].i == 0)
11679 out_attr[i].i = in_attr[i].i;
11680 else if (in_attr[i].i != 0
11681 && in_attr[i].i != out_attr[i].i)
11683 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11688 (_("error: %B: unable to merge virtualization attributes "
11696 case Tag_CPU_arch_profile:
11697 if (out_attr[i].i != in_attr[i].i)
11699 /* 0 will merge with anything.
11700 'A' and 'S' merge to 'A'.
11701 'R' and 'S' merge to 'R'.
11702 'M' and 'A|R|S' is an error. */
11703 if (out_attr[i].i == 0
11704 || (out_attr[i].i == 'S'
11705 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11706 out_attr[i].i = in_attr[i].i;
11707 else if (in_attr[i].i == 0
11708 || (in_attr[i].i == 'S'
11709 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11710 ; /* Do nothing. */
11714 (_("error: %B: Conflicting architecture profiles %c/%c"),
11716 in_attr[i].i ? in_attr[i].i : '0',
11717 out_attr[i].i ? out_attr[i].i : '0');
11724 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11725 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11726 when it's 0. It might mean absence of FP hardware if
11727 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
11729 #define VFP_VERSION_COUNT 8
11730 static const struct
11734 } vfp_versions[VFP_VERSION_COUNT] =
11749 /* If the output has no requirement about FP hardware,
11750 follow the requirement of the input. */
11751 if (out_attr[i].i == 0)
11753 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11754 out_attr[i].i = in_attr[i].i;
11755 out_attr[Tag_ABI_HardFP_use].i
11756 = in_attr[Tag_ABI_HardFP_use].i;
11759 /* If the input has no requirement about FP hardware, do
11761 else if (in_attr[i].i == 0)
11763 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
11767 /* Both the input and the output have nonzero Tag_FP_arch.
11768 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
11770 /* If both the input and the output have zero Tag_ABI_HardFP_use,
11772 if (in_attr[Tag_ABI_HardFP_use].i == 0
11773 && out_attr[Tag_ABI_HardFP_use].i == 0)
11775 /* If the input and the output have different Tag_ABI_HardFP_use,
11776 the combination of them is 3 (SP & DP). */
11777 else if (in_attr[Tag_ABI_HardFP_use].i
11778 != out_attr[Tag_ABI_HardFP_use].i)
11779 out_attr[Tag_ABI_HardFP_use].i = 3;
11781 /* Now we can handle Tag_FP_arch. */
11783 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
11784 pick the biggest. */
11785 if (in_attr[i].i >= VFP_VERSION_COUNT
11786 && in_attr[i].i > out_attr[i].i)
11788 out_attr[i] = in_attr[i];
11791 /* The output uses the superset of input features
11792 (ISA version) and registers. */
11793 ver = vfp_versions[in_attr[i].i].ver;
11794 if (ver < vfp_versions[out_attr[i].i].ver)
11795 ver = vfp_versions[out_attr[i].i].ver;
11796 regs = vfp_versions[in_attr[i].i].regs;
11797 if (regs < vfp_versions[out_attr[i].i].regs)
11798 regs = vfp_versions[out_attr[i].i].regs;
11799 /* This assumes all possible supersets are also a valid
11801 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
11803 if (regs == vfp_versions[newval].regs
11804 && ver == vfp_versions[newval].ver)
11807 out_attr[i].i = newval;
11810 case Tag_PCS_config:
11811 if (out_attr[i].i == 0)
11812 out_attr[i].i = in_attr[i].i;
11813 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
11815 /* It's sometimes ok to mix different configs, so this is only
11818 (_("Warning: %B: Conflicting platform configuration"), ibfd);
11821 case Tag_ABI_PCS_R9_use:
11822 if (in_attr[i].i != out_attr[i].i
11823 && out_attr[i].i != AEABI_R9_unused
11824 && in_attr[i].i != AEABI_R9_unused)
11827 (_("error: %B: Conflicting use of R9"), ibfd);
11830 if (out_attr[i].i == AEABI_R9_unused)
11831 out_attr[i].i = in_attr[i].i;
11833 case Tag_ABI_PCS_RW_data:
11834 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
11835 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
11836 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
11839 (_("error: %B: SB relative addressing conflicts with use of R9"),
11843 /* Use the smallest value specified. */
11844 if (in_attr[i].i < out_attr[i].i)
11845 out_attr[i].i = in_attr[i].i;
11847 case Tag_ABI_PCS_wchar_t:
11848 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
11849 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
11852 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
11853 ibfd, in_attr[i].i, out_attr[i].i);
11855 else if (in_attr[i].i && !out_attr[i].i)
11856 out_attr[i].i = in_attr[i].i;
11858 case Tag_ABI_enum_size:
11859 if (in_attr[i].i != AEABI_enum_unused)
11861 if (out_attr[i].i == AEABI_enum_unused
11862 || out_attr[i].i == AEABI_enum_forced_wide)
11864 /* The existing object is compatible with anything.
11865 Use whatever requirements the new object has. */
11866 out_attr[i].i = in_attr[i].i;
11868 else if (in_attr[i].i != AEABI_enum_forced_wide
11869 && out_attr[i].i != in_attr[i].i
11870 && !elf_arm_tdata (obfd)->no_enum_size_warning)
11872 static const char *aeabi_enum_names[] =
11873 { "", "variable-size", "32-bit", "" };
11874 const char *in_name =
11875 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11876 ? aeabi_enum_names[in_attr[i].i]
11878 const char *out_name =
11879 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11880 ? aeabi_enum_names[out_attr[i].i]
11883 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
11884 ibfd, in_name, out_name);
11888 case Tag_ABI_VFP_args:
11891 case Tag_ABI_WMMX_args:
11892 if (in_attr[i].i != out_attr[i].i)
11895 (_("error: %B uses iWMMXt register arguments, %B does not"),
11900 case Tag_compatibility:
11901 /* Merged in target-independent code. */
11903 case Tag_ABI_HardFP_use:
11904 /* This is handled along with Tag_FP_arch. */
11906 case Tag_ABI_FP_16bit_format:
11907 if (in_attr[i].i != 0 && out_attr[i].i != 0)
11909 if (in_attr[i].i != out_attr[i].i)
11912 (_("error: fp16 format mismatch between %B and %B"),
11917 if (in_attr[i].i != 0)
11918 out_attr[i].i = in_attr[i].i;
11922 /* A value of zero on input means that the divide instruction may
11923 be used if available in the base architecture as specified via
11924 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
11925 the user did not want divide instructions. A value of 2
11926 explicitly means that divide instructions were allowed in ARM
11927 and Thumb state. */
11928 if (in_attr[i].i == out_attr[i].i)
11929 /* Do nothing. */ ;
11930 else if (elf32_arm_attributes_forbid_div (in_attr)
11931 && !elf32_arm_attributes_accept_div (out_attr))
11933 else if (elf32_arm_attributes_forbid_div (out_attr)
11934 && elf32_arm_attributes_accept_div (in_attr))
11935 out_attr[i].i = in_attr[i].i;
11936 else if (in_attr[i].i == 2)
11937 out_attr[i].i = in_attr[i].i;
11940 case Tag_MPextension_use_legacy:
11941 /* We don't output objects with Tag_MPextension_use_legacy - we
11942 move the value to Tag_MPextension_use. */
11943 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
11945 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
11948 (_("%B has has both the current and legacy "
11949 "Tag_MPextension_use attributes"),
11955 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
11956 out_attr[Tag_MPextension_use] = in_attr[i];
11960 case Tag_nodefaults:
11961 /* This tag is set if it exists, but the value is unused (and is
11962 typically zero). We don't actually need to do anything here -
11963 the merge happens automatically when the type flags are merged
11966 case Tag_also_compatible_with:
11967 /* Already done in Tag_CPU_arch. */
11969 case Tag_conformance:
11970 /* Keep the attribute if it matches. Throw it away otherwise.
11971 No attribute means no claim to conform. */
11972 if (!in_attr[i].s || !out_attr[i].s
11973 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
11974 out_attr[i].s = NULL;
11979 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
11982 /* If out_attr was copied from in_attr then it won't have a type yet. */
11983 if (in_attr[i].type && !out_attr[i].type)
11984 out_attr[i].type = in_attr[i].type;
11987 /* Merge Tag_compatibility attributes and any common GNU ones. */
11988 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
11991 /* Check for any attributes not known on ARM. */
11992 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
11998 /* Return TRUE if the two EABI versions are incompatible. */
12001 elf32_arm_versions_compatible (unsigned iver, unsigned over)
12003 /* v4 and v5 are the same spec before and after it was released,
12004 so allow mixing them. */
12005 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
12006 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
12009 return (iver == over);
12012 /* Merge backend specific data from an object file to the output
12013 object file when linking. */
12016 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
12018 /* Display the flags field. */
12021 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
12023 FILE * file = (FILE *) ptr;
12024 unsigned long flags;
12026 BFD_ASSERT (abfd != NULL && ptr != NULL);
12028 /* Print normal ELF private data. */
12029 _bfd_elf_print_private_bfd_data (abfd, ptr);
12031 flags = elf_elfheader (abfd)->e_flags;
12032 /* Ignore init flag - it may not be set, despite the flags field
12033 containing valid data. */
12035 /* xgettext:c-format */
12036 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12038 switch (EF_ARM_EABI_VERSION (flags))
12040 case EF_ARM_EABI_UNKNOWN:
12041 /* The following flag bits are GNU extensions and not part of the
12042 official ARM ELF extended ABI. Hence they are only decoded if
12043 the EABI version is not set. */
12044 if (flags & EF_ARM_INTERWORK)
12045 fprintf (file, _(" [interworking enabled]"));
12047 if (flags & EF_ARM_APCS_26)
12048 fprintf (file, " [APCS-26]");
12050 fprintf (file, " [APCS-32]");
12052 if (flags & EF_ARM_VFP_FLOAT)
12053 fprintf (file, _(" [VFP float format]"));
12054 else if (flags & EF_ARM_MAVERICK_FLOAT)
12055 fprintf (file, _(" [Maverick float format]"));
12057 fprintf (file, _(" [FPA float format]"));
12059 if (flags & EF_ARM_APCS_FLOAT)
12060 fprintf (file, _(" [floats passed in float registers]"));
12062 if (flags & EF_ARM_PIC)
12063 fprintf (file, _(" [position independent]"));
12065 if (flags & EF_ARM_NEW_ABI)
12066 fprintf (file, _(" [new ABI]"));
12068 if (flags & EF_ARM_OLD_ABI)
12069 fprintf (file, _(" [old ABI]"));
12071 if (flags & EF_ARM_SOFT_FLOAT)
12072 fprintf (file, _(" [software FP]"));
12074 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12075 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12076 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12077 | EF_ARM_MAVERICK_FLOAT);
12080 case EF_ARM_EABI_VER1:
12081 fprintf (file, _(" [Version1 EABI]"));
12083 if (flags & EF_ARM_SYMSARESORTED)
12084 fprintf (file, _(" [sorted symbol table]"));
12086 fprintf (file, _(" [unsorted symbol table]"));
12088 flags &= ~ EF_ARM_SYMSARESORTED;
12091 case EF_ARM_EABI_VER2:
12092 fprintf (file, _(" [Version2 EABI]"));
12094 if (flags & EF_ARM_SYMSARESORTED)
12095 fprintf (file, _(" [sorted symbol table]"));
12097 fprintf (file, _(" [unsorted symbol table]"));
12099 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12100 fprintf (file, _(" [dynamic symbols use segment index]"));
12102 if (flags & EF_ARM_MAPSYMSFIRST)
12103 fprintf (file, _(" [mapping symbols precede others]"));
12105 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12106 | EF_ARM_MAPSYMSFIRST);
12109 case EF_ARM_EABI_VER3:
12110 fprintf (file, _(" [Version3 EABI]"));
12113 case EF_ARM_EABI_VER4:
12114 fprintf (file, _(" [Version4 EABI]"));
12117 case EF_ARM_EABI_VER5:
12118 fprintf (file, _(" [Version5 EABI]"));
12120 if (flags & EF_ARM_ABI_FLOAT_SOFT)
12121 fprintf (file, _(" [soft-float ABI]"));
12123 if (flags & EF_ARM_ABI_FLOAT_HARD)
12124 fprintf (file, _(" [hard-float ABI]"));
12126 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
12129 if (flags & EF_ARM_BE8)
12130 fprintf (file, _(" [BE8]"));
12132 if (flags & EF_ARM_LE8)
12133 fprintf (file, _(" [LE8]"));
12135 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12139 fprintf (file, _(" <EABI version unrecognised>"));
12143 flags &= ~ EF_ARM_EABIMASK;
12145 if (flags & EF_ARM_RELEXEC)
12146 fprintf (file, _(" [relocatable executable]"));
12148 if (flags & EF_ARM_HASENTRY)
12149 fprintf (file, _(" [has entry point]"));
12151 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
12154 fprintf (file, _("<Unrecognised flag bits set>"));
12156 fputc ('\n', file);
12162 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
12164 switch (ELF_ST_TYPE (elf_sym->st_info))
12166 case STT_ARM_TFUNC:
12167 return ELF_ST_TYPE (elf_sym->st_info);
12169 case STT_ARM_16BIT:
12170 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
12171 This allows us to distinguish between data used by Thumb instructions
12172 and non-data (which is probably code) inside Thumb regions of an
12174 if (type != STT_OBJECT && type != STT_TLS)
12175 return ELF_ST_TYPE (elf_sym->st_info);
12186 elf32_arm_gc_mark_hook (asection *sec,
12187 struct bfd_link_info *info,
12188 Elf_Internal_Rela *rel,
12189 struct elf_link_hash_entry *h,
12190 Elf_Internal_Sym *sym)
12193 switch (ELF32_R_TYPE (rel->r_info))
12195 case R_ARM_GNU_VTINHERIT:
12196 case R_ARM_GNU_VTENTRY:
12200 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
12203 /* Update the got entry reference counts for the section being removed. */
12206 elf32_arm_gc_sweep_hook (bfd * abfd,
12207 struct bfd_link_info * info,
12209 const Elf_Internal_Rela * relocs)
12211 Elf_Internal_Shdr *symtab_hdr;
12212 struct elf_link_hash_entry **sym_hashes;
12213 bfd_signed_vma *local_got_refcounts;
12214 const Elf_Internal_Rela *rel, *relend;
12215 struct elf32_arm_link_hash_table * globals;
12217 if (info->relocatable)
12220 globals = elf32_arm_hash_table (info);
12221 if (globals == NULL)
12224 elf_section_data (sec)->local_dynrel = NULL;
12226 symtab_hdr = & elf_symtab_hdr (abfd);
12227 sym_hashes = elf_sym_hashes (abfd);
12228 local_got_refcounts = elf_local_got_refcounts (abfd);
12230 check_use_blx (globals);
12232 relend = relocs + sec->reloc_count;
12233 for (rel = relocs; rel < relend; rel++)
12235 unsigned long r_symndx;
12236 struct elf_link_hash_entry *h = NULL;
12237 struct elf32_arm_link_hash_entry *eh;
12239 bfd_boolean call_reloc_p;
12240 bfd_boolean may_become_dynamic_p;
12241 bfd_boolean may_need_local_target_p;
12242 union gotplt_union *root_plt;
12243 struct arm_plt_info *arm_plt;
12245 r_symndx = ELF32_R_SYM (rel->r_info);
12246 if (r_symndx >= symtab_hdr->sh_info)
12248 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12249 while (h->root.type == bfd_link_hash_indirect
12250 || h->root.type == bfd_link_hash_warning)
12251 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12253 eh = (struct elf32_arm_link_hash_entry *) h;
12255 call_reloc_p = FALSE;
12256 may_become_dynamic_p = FALSE;
12257 may_need_local_target_p = FALSE;
12259 r_type = ELF32_R_TYPE (rel->r_info);
12260 r_type = arm_real_reloc_type (globals, r_type);
12264 case R_ARM_GOT_PREL:
12265 case R_ARM_TLS_GD32:
12266 case R_ARM_TLS_IE32:
12269 if (h->got.refcount > 0)
12270 h->got.refcount -= 1;
12272 else if (local_got_refcounts != NULL)
12274 if (local_got_refcounts[r_symndx] > 0)
12275 local_got_refcounts[r_symndx] -= 1;
12279 case R_ARM_TLS_LDM32:
12280 globals->tls_ldm_got.refcount -= 1;
12288 case R_ARM_THM_CALL:
12289 case R_ARM_THM_JUMP24:
12290 case R_ARM_THM_JUMP19:
12291 call_reloc_p = TRUE;
12292 may_need_local_target_p = TRUE;
12296 if (!globals->vxworks_p)
12298 may_need_local_target_p = TRUE;
12301 /* Fall through. */
12303 case R_ARM_ABS32_NOI:
12305 case R_ARM_REL32_NOI:
12306 case R_ARM_MOVW_ABS_NC:
12307 case R_ARM_MOVT_ABS:
12308 case R_ARM_MOVW_PREL_NC:
12309 case R_ARM_MOVT_PREL:
12310 case R_ARM_THM_MOVW_ABS_NC:
12311 case R_ARM_THM_MOVT_ABS:
12312 case R_ARM_THM_MOVW_PREL_NC:
12313 case R_ARM_THM_MOVT_PREL:
12314 /* Should the interworking branches be here also? */
12315 if ((info->shared || globals->root.is_relocatable_executable)
12316 && (sec->flags & SEC_ALLOC) != 0)
12319 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12321 call_reloc_p = TRUE;
12322 may_need_local_target_p = TRUE;
12325 may_become_dynamic_p = TRUE;
12328 may_need_local_target_p = TRUE;
12335 if (may_need_local_target_p
12336 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12338 /* If PLT refcount book-keeping is wrong and too low, we'll
12339 see a zero value (going to -1) for the root PLT reference
12341 if (root_plt->refcount >= 0)
12343 BFD_ASSERT (root_plt->refcount != 0);
12344 root_plt->refcount -= 1;
12347 /* A value of -1 means the symbol has become local, forced
12348 or seeing a hidden definition. Any other negative value
12350 BFD_ASSERT (root_plt->refcount == -1);
12353 arm_plt->noncall_refcount--;
12355 if (r_type == R_ARM_THM_CALL)
12356 arm_plt->maybe_thumb_refcount--;
12358 if (r_type == R_ARM_THM_JUMP24
12359 || r_type == R_ARM_THM_JUMP19)
12360 arm_plt->thumb_refcount--;
12363 if (may_become_dynamic_p)
12365 struct elf_dyn_relocs **pp;
12366 struct elf_dyn_relocs *p;
12369 pp = &(eh->dyn_relocs);
12372 Elf_Internal_Sym *isym;
12374 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12378 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12382 for (; (p = *pp) != NULL; pp = &p->next)
12385 /* Everything must go for SEC. */
12395 /* Look through the relocs for a section during the first phase. */
12398 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12399 asection *sec, const Elf_Internal_Rela *relocs)
12401 Elf_Internal_Shdr *symtab_hdr;
12402 struct elf_link_hash_entry **sym_hashes;
12403 const Elf_Internal_Rela *rel;
12404 const Elf_Internal_Rela *rel_end;
12407 struct elf32_arm_link_hash_table *htab;
12408 bfd_boolean call_reloc_p;
12409 bfd_boolean may_become_dynamic_p;
12410 bfd_boolean may_need_local_target_p;
12411 unsigned long nsyms;
12413 if (info->relocatable)
12416 BFD_ASSERT (is_arm_elf (abfd));
12418 htab = elf32_arm_hash_table (info);
12424 /* Create dynamic sections for relocatable executables so that we can
12425 copy relocations. */
12426 if (htab->root.is_relocatable_executable
12427 && ! htab->root.dynamic_sections_created)
12429 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12433 if (htab->root.dynobj == NULL)
12434 htab->root.dynobj = abfd;
12435 if (!create_ifunc_sections (info))
12438 dynobj = htab->root.dynobj;
12440 symtab_hdr = & elf_symtab_hdr (abfd);
12441 sym_hashes = elf_sym_hashes (abfd);
12442 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12444 rel_end = relocs + sec->reloc_count;
12445 for (rel = relocs; rel < rel_end; rel++)
12447 Elf_Internal_Sym *isym;
12448 struct elf_link_hash_entry *h;
12449 struct elf32_arm_link_hash_entry *eh;
12450 unsigned long r_symndx;
12453 r_symndx = ELF32_R_SYM (rel->r_info);
12454 r_type = ELF32_R_TYPE (rel->r_info);
12455 r_type = arm_real_reloc_type (htab, r_type);
12457 if (r_symndx >= nsyms
12458 /* PR 9934: It is possible to have relocations that do not
12459 refer to symbols, thus it is also possible to have an
12460 object file containing relocations but no symbol table. */
12461 && (r_symndx > STN_UNDEF || nsyms > 0))
12463 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12472 if (r_symndx < symtab_hdr->sh_info)
12474 /* A local symbol. */
12475 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12482 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12483 while (h->root.type == bfd_link_hash_indirect
12484 || h->root.type == bfd_link_hash_warning)
12485 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12489 eh = (struct elf32_arm_link_hash_entry *) h;
12491 call_reloc_p = FALSE;
12492 may_become_dynamic_p = FALSE;
12493 may_need_local_target_p = FALSE;
12495 /* Could be done earlier, if h were already available. */
12496 r_type = elf32_arm_tls_transition (info, r_type, h);
12500 case R_ARM_GOT_PREL:
12501 case R_ARM_TLS_GD32:
12502 case R_ARM_TLS_IE32:
12503 case R_ARM_TLS_GOTDESC:
12504 case R_ARM_TLS_DESCSEQ:
12505 case R_ARM_THM_TLS_DESCSEQ:
12506 case R_ARM_TLS_CALL:
12507 case R_ARM_THM_TLS_CALL:
12508 /* This symbol requires a global offset table entry. */
12510 int tls_type, old_tls_type;
12514 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12516 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12518 case R_ARM_TLS_GOTDESC:
12519 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12520 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12521 tls_type = GOT_TLS_GDESC; break;
12523 default: tls_type = GOT_NORMAL; break;
12529 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12533 /* This is a global offset table entry for a local symbol. */
12534 if (!elf32_arm_allocate_local_sym_info (abfd))
12536 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12537 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12540 /* If a variable is accessed with both tls methods, two
12541 slots may be created. */
12542 if (GOT_TLS_GD_ANY_P (old_tls_type)
12543 && GOT_TLS_GD_ANY_P (tls_type))
12544 tls_type |= old_tls_type;
12546 /* We will already have issued an error message if there
12547 is a TLS/non-TLS mismatch, based on the symbol
12548 type. So just combine any TLS types needed. */
12549 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12550 && tls_type != GOT_NORMAL)
12551 tls_type |= old_tls_type;
12553 /* If the symbol is accessed in both IE and GDESC
12554 method, we're able to relax. Turn off the GDESC flag,
12555 without messing up with any other kind of tls types
12556 that may be involved */
12557 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12558 tls_type &= ~GOT_TLS_GDESC;
12560 if (old_tls_type != tls_type)
12563 elf32_arm_hash_entry (h)->tls_type = tls_type;
12565 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12568 /* Fall through. */
12570 case R_ARM_TLS_LDM32:
12571 if (r_type == R_ARM_TLS_LDM32)
12572 htab->tls_ldm_got.refcount++;
12573 /* Fall through. */
12575 case R_ARM_GOTOFF32:
12577 if (htab->root.sgot == NULL
12578 && !create_got_section (htab->root.dynobj, info))
12587 case R_ARM_THM_CALL:
12588 case R_ARM_THM_JUMP24:
12589 case R_ARM_THM_JUMP19:
12590 call_reloc_p = TRUE;
12591 may_need_local_target_p = TRUE;
12595 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12596 ldr __GOTT_INDEX__ offsets. */
12597 if (!htab->vxworks_p)
12599 may_need_local_target_p = TRUE;
12602 /* Fall through. */
12604 case R_ARM_MOVW_ABS_NC:
12605 case R_ARM_MOVT_ABS:
12606 case R_ARM_THM_MOVW_ABS_NC:
12607 case R_ARM_THM_MOVT_ABS:
12610 (*_bfd_error_handler)
12611 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12612 abfd, elf32_arm_howto_table_1[r_type].name,
12613 (h) ? h->root.root.string : "a local symbol");
12614 bfd_set_error (bfd_error_bad_value);
12618 /* Fall through. */
12620 case R_ARM_ABS32_NOI:
12622 case R_ARM_REL32_NOI:
12623 case R_ARM_MOVW_PREL_NC:
12624 case R_ARM_MOVT_PREL:
12625 case R_ARM_THM_MOVW_PREL_NC:
12626 case R_ARM_THM_MOVT_PREL:
12628 /* Should the interworking branches be listed here? */
12629 if ((info->shared || htab->root.is_relocatable_executable)
12630 && (sec->flags & SEC_ALLOC) != 0)
12633 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12635 /* In shared libraries and relocatable executables,
12636 we treat local relative references as calls;
12637 see the related SYMBOL_CALLS_LOCAL code in
12638 allocate_dynrelocs. */
12639 call_reloc_p = TRUE;
12640 may_need_local_target_p = TRUE;
12643 /* We are creating a shared library or relocatable
12644 executable, and this is a reloc against a global symbol,
12645 or a non-PC-relative reloc against a local symbol.
12646 We may need to copy the reloc into the output. */
12647 may_become_dynamic_p = TRUE;
12650 may_need_local_target_p = TRUE;
12653 /* This relocation describes the C++ object vtable hierarchy.
12654 Reconstruct it for later use during GC. */
12655 case R_ARM_GNU_VTINHERIT:
12656 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12660 /* This relocation describes which C++ vtable entries are actually
12661 used. Record for later use during GC. */
12662 case R_ARM_GNU_VTENTRY:
12663 BFD_ASSERT (h != NULL);
12665 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12673 /* We may need a .plt entry if the function this reloc
12674 refers to is in a different object, regardless of the
12675 symbol's type. We can't tell for sure yet, because
12676 something later might force the symbol local. */
12678 else if (may_need_local_target_p)
12679 /* If this reloc is in a read-only section, we might
12680 need a copy reloc. We can't check reliably at this
12681 stage whether the section is read-only, as input
12682 sections have not yet been mapped to output sections.
12683 Tentatively set the flag for now, and correct in
12684 adjust_dynamic_symbol. */
12685 h->non_got_ref = 1;
12688 if (may_need_local_target_p
12689 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12691 union gotplt_union *root_plt;
12692 struct arm_plt_info *arm_plt;
12693 struct arm_local_iplt_info *local_iplt;
12697 root_plt = &h->plt;
12698 arm_plt = &eh->plt;
12702 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12703 if (local_iplt == NULL)
12705 root_plt = &local_iplt->root;
12706 arm_plt = &local_iplt->arm;
12709 /* If the symbol is a function that doesn't bind locally,
12710 this relocation will need a PLT entry. */
12711 if (root_plt->refcount != -1)
12712 root_plt->refcount += 1;
12715 arm_plt->noncall_refcount++;
12717 /* It's too early to use htab->use_blx here, so we have to
12718 record possible blx references separately from
12719 relocs that definitely need a thumb stub. */
12721 if (r_type == R_ARM_THM_CALL)
12722 arm_plt->maybe_thumb_refcount += 1;
12724 if (r_type == R_ARM_THM_JUMP24
12725 || r_type == R_ARM_THM_JUMP19)
12726 arm_plt->thumb_refcount += 1;
12729 if (may_become_dynamic_p)
12731 struct elf_dyn_relocs *p, **head;
12733 /* Create a reloc section in dynobj. */
12734 if (sreloc == NULL)
12736 sreloc = _bfd_elf_make_dynamic_reloc_section
12737 (sec, dynobj, 2, abfd, ! htab->use_rel);
12739 if (sreloc == NULL)
12742 /* BPABI objects never have dynamic relocations mapped. */
12743 if (htab->symbian_p)
12747 flags = bfd_get_section_flags (dynobj, sreloc);
12748 flags &= ~(SEC_LOAD | SEC_ALLOC);
12749 bfd_set_section_flags (dynobj, sreloc, flags);
12753 /* If this is a global symbol, count the number of
12754 relocations we need for this symbol. */
12756 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12759 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12765 if (p == NULL || p->sec != sec)
12767 bfd_size_type amt = sizeof *p;
12769 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
12779 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
12788 /* Unwinding tables are not referenced directly. This pass marks them as
12789 required if the corresponding code section is marked. */
12792 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
12793 elf_gc_mark_hook_fn gc_mark_hook)
12796 Elf_Internal_Shdr **elf_shdrp;
12799 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
12801 /* Marking EH data may cause additional code sections to be marked,
12802 requiring multiple passes. */
12807 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
12811 if (! is_arm_elf (sub))
12814 elf_shdrp = elf_elfsections (sub);
12815 for (o = sub->sections; o != NULL; o = o->next)
12817 Elf_Internal_Shdr *hdr;
12819 hdr = &elf_section_data (o)->this_hdr;
12820 if (hdr->sh_type == SHT_ARM_EXIDX
12822 && hdr->sh_link < elf_numsections (sub)
12824 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
12827 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
12837 /* Treat mapping symbols as special target symbols. */
12840 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
12842 return bfd_is_arm_special_symbol_name (sym->name,
12843 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
12846 /* This is a copy of elf_find_function() from elf.c except that
12847 ARM mapping symbols are ignored when looking for function names
12848 and STT_ARM_TFUNC is considered to a function type. */
12851 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
12852 asection * section,
12853 asymbol ** symbols,
12855 const char ** filename_ptr,
12856 const char ** functionname_ptr)
12858 const char * filename = NULL;
12859 asymbol * func = NULL;
12860 bfd_vma low_func = 0;
12863 for (p = symbols; *p != NULL; p++)
12865 elf_symbol_type *q;
12867 q = (elf_symbol_type *) *p;
12869 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
12874 filename = bfd_asymbol_name (&q->symbol);
12877 case STT_ARM_TFUNC:
12879 /* Skip mapping symbols. */
12880 if ((q->symbol.flags & BSF_LOCAL)
12881 && bfd_is_arm_special_symbol_name (q->symbol.name,
12882 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
12884 /* Fall through. */
12885 if (bfd_get_section (&q->symbol) == section
12886 && q->symbol.value >= low_func
12887 && q->symbol.value <= offset)
12889 func = (asymbol *) q;
12890 low_func = q->symbol.value;
12900 *filename_ptr = filename;
12901 if (functionname_ptr)
12902 *functionname_ptr = bfd_asymbol_name (func);
12908 /* Find the nearest line to a particular section and offset, for error
12909 reporting. This code is a duplicate of the code in elf.c, except
12910 that it uses arm_elf_find_function. */
12913 elf32_arm_find_nearest_line (bfd * abfd,
12914 asection * section,
12915 asymbol ** symbols,
12917 const char ** filename_ptr,
12918 const char ** functionname_ptr,
12919 unsigned int * line_ptr)
12921 bfd_boolean found = FALSE;
12923 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
12925 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
12926 section, symbols, offset,
12927 filename_ptr, functionname_ptr,
12929 & elf_tdata (abfd)->dwarf2_find_line_info))
12931 if (!*functionname_ptr)
12932 arm_elf_find_function (abfd, section, symbols, offset,
12933 *filename_ptr ? NULL : filename_ptr,
12939 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
12940 & found, filename_ptr,
12941 functionname_ptr, line_ptr,
12942 & elf_tdata (abfd)->line_info))
12945 if (found && (*functionname_ptr || *line_ptr))
12948 if (symbols == NULL)
12951 if (! arm_elf_find_function (abfd, section, symbols, offset,
12952 filename_ptr, functionname_ptr))
12960 elf32_arm_find_inliner_info (bfd * abfd,
12961 const char ** filename_ptr,
12962 const char ** functionname_ptr,
12963 unsigned int * line_ptr)
12966 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
12967 functionname_ptr, line_ptr,
12968 & elf_tdata (abfd)->dwarf2_find_line_info);
12972 /* Adjust a symbol defined by a dynamic object and referenced by a
12973 regular object. The current definition is in some section of the
12974 dynamic object, but we're not including those sections. We have to
12975 change the definition to something the rest of the link can
12979 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
12980 struct elf_link_hash_entry * h)
12984 struct elf32_arm_link_hash_entry * eh;
12985 struct elf32_arm_link_hash_table *globals;
12987 globals = elf32_arm_hash_table (info);
12988 if (globals == NULL)
12991 dynobj = elf_hash_table (info)->dynobj;
12993 /* Make sure we know what is going on here. */
12994 BFD_ASSERT (dynobj != NULL
12996 || h->type == STT_GNU_IFUNC
12997 || h->u.weakdef != NULL
13000 && !h->def_regular)));
13002 eh = (struct elf32_arm_link_hash_entry *) h;
13004 /* If this is a function, put it in the procedure linkage table. We
13005 will fill in the contents of the procedure linkage table later,
13006 when we know the address of the .got section. */
13007 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
13009 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
13010 symbol binds locally. */
13011 if (h->plt.refcount <= 0
13012 || (h->type != STT_GNU_IFUNC
13013 && (SYMBOL_CALLS_LOCAL (info, h)
13014 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
13015 && h->root.type == bfd_link_hash_undefweak))))
13017 /* This case can occur if we saw a PLT32 reloc in an input
13018 file, but the symbol was never referred to by a dynamic
13019 object, or if all references were garbage collected. In
13020 such a case, we don't actually need to build a procedure
13021 linkage table, and we can just do a PC24 reloc instead. */
13022 h->plt.offset = (bfd_vma) -1;
13023 eh->plt.thumb_refcount = 0;
13024 eh->plt.maybe_thumb_refcount = 0;
13025 eh->plt.noncall_refcount = 0;
13033 /* It's possible that we incorrectly decided a .plt reloc was
13034 needed for an R_ARM_PC24 or similar reloc to a non-function sym
13035 in check_relocs. We can't decide accurately between function
13036 and non-function syms in check-relocs; Objects loaded later in
13037 the link may change h->type. So fix it now. */
13038 h->plt.offset = (bfd_vma) -1;
13039 eh->plt.thumb_refcount = 0;
13040 eh->plt.maybe_thumb_refcount = 0;
13041 eh->plt.noncall_refcount = 0;
13044 /* If this is a weak symbol, and there is a real definition, the
13045 processor independent code will have arranged for us to see the
13046 real definition first, and we can just use the same value. */
13047 if (h->u.weakdef != NULL)
13049 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13050 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13051 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13052 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13056 /* If there are no non-GOT references, we do not need a copy
13058 if (!h->non_got_ref)
13061 /* This is a reference to a symbol defined by a dynamic object which
13062 is not a function. */
13064 /* If we are creating a shared library, we must presume that the
13065 only references to the symbol are via the global offset table.
13066 For such cases we need not do anything here; the relocations will
13067 be handled correctly by relocate_section. Relocatable executables
13068 can reference data in shared objects directly, so we don't need to
13069 do anything here. */
13070 if (info->shared || globals->root.is_relocatable_executable)
13073 /* We must allocate the symbol in our .dynbss section, which will
13074 become part of the .bss section of the executable. There will be
13075 an entry for this symbol in the .dynsym section. The dynamic
13076 object will contain position independent code, so all references
13077 from the dynamic object to this symbol will go through the global
13078 offset table. The dynamic linker will use the .dynsym entry to
13079 determine the address it must put in the global offset table, so
13080 both the dynamic object and the regular object will refer to the
13081 same memory location for the variable. */
13082 s = bfd_get_linker_section (dynobj, ".dynbss");
13083 BFD_ASSERT (s != NULL);
13085 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13086 copy the initial value out of the dynamic object and into the
13087 runtime process image. We need to remember the offset into the
13088 .rel(a).bss section we are going to use. */
13089 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13093 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
13094 elf32_arm_allocate_dynrelocs (info, srel, 1);
13098 return _bfd_elf_adjust_dynamic_copy (h, s);
13101 /* Allocate space in .plt, .got and associated reloc sections for
13105 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13107 struct bfd_link_info *info;
13108 struct elf32_arm_link_hash_table *htab;
13109 struct elf32_arm_link_hash_entry *eh;
13110 struct elf_dyn_relocs *p;
13112 if (h->root.type == bfd_link_hash_indirect)
13115 eh = (struct elf32_arm_link_hash_entry *) h;
13117 info = (struct bfd_link_info *) inf;
13118 htab = elf32_arm_hash_table (info);
13122 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13123 && h->plt.refcount > 0)
13125 /* Make sure this symbol is output as a dynamic symbol.
13126 Undefined weak syms won't yet be marked as dynamic. */
13127 if (h->dynindx == -1
13128 && !h->forced_local)
13130 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13134 /* If the call in the PLT entry binds locally, the associated
13135 GOT entry should use an R_ARM_IRELATIVE relocation instead of
13136 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
13137 than the .plt section. */
13138 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
13141 if (eh->plt.noncall_refcount == 0
13142 && SYMBOL_REFERENCES_LOCAL (info, h))
13143 /* All non-call references can be resolved directly.
13144 This means that they can (and in some cases, must)
13145 resolve directly to the run-time target, rather than
13146 to the PLT. That in turns means that any .got entry
13147 would be equal to the .igot.plt entry, so there's
13148 no point having both. */
13149 h->got.refcount = 0;
13154 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
13156 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
13158 /* If this symbol is not defined in a regular file, and we are
13159 not generating a shared library, then set the symbol to this
13160 location in the .plt. This is required to make function
13161 pointers compare as equal between the normal executable and
13162 the shared library. */
13164 && !h->def_regular)
13166 h->root.u.def.section = htab->root.splt;
13167 h->root.u.def.value = h->plt.offset;
13169 /* Make sure the function is not marked as Thumb, in case
13170 it is the target of an ABS32 relocation, which will
13171 point to the PLT entry. */
13172 h->target_internal = ST_BRANCH_TO_ARM;
13175 htab->next_tls_desc_index++;
13177 /* VxWorks executables have a second set of relocations for
13178 each PLT entry. They go in a separate relocation section,
13179 which is processed by the kernel loader. */
13180 if (htab->vxworks_p && !info->shared)
13182 /* There is a relocation for the initial PLT entry:
13183 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
13184 if (h->plt.offset == htab->plt_header_size)
13185 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
13187 /* There are two extra relocations for each subsequent
13188 PLT entry: an R_ARM_32 relocation for the GOT entry,
13189 and an R_ARM_32 relocation for the PLT entry. */
13190 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
13195 h->plt.offset = (bfd_vma) -1;
13201 h->plt.offset = (bfd_vma) -1;
13205 eh = (struct elf32_arm_link_hash_entry *) h;
13206 eh->tlsdesc_got = (bfd_vma) -1;
13208 if (h->got.refcount > 0)
13212 int tls_type = elf32_arm_hash_entry (h)->tls_type;
13215 /* Make sure this symbol is output as a dynamic symbol.
13216 Undefined weak syms won't yet be marked as dynamic. */
13217 if (h->dynindx == -1
13218 && !h->forced_local)
13220 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13224 if (!htab->symbian_p)
13226 s = htab->root.sgot;
13227 h->got.offset = s->size;
13229 if (tls_type == GOT_UNKNOWN)
13232 if (tls_type == GOT_NORMAL)
13233 /* Non-TLS symbols need one GOT slot. */
13237 if (tls_type & GOT_TLS_GDESC)
13239 /* R_ARM_TLS_DESC needs 2 GOT slots. */
13241 = (htab->root.sgotplt->size
13242 - elf32_arm_compute_jump_table_size (htab));
13243 htab->root.sgotplt->size += 8;
13244 h->got.offset = (bfd_vma) -2;
13245 /* plt.got_offset needs to know there's a TLS_DESC
13246 reloc in the middle of .got.plt. */
13247 htab->num_tls_desc++;
13250 if (tls_type & GOT_TLS_GD)
13252 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
13253 the symbol is both GD and GDESC, got.offset may
13254 have been overwritten. */
13255 h->got.offset = s->size;
13259 if (tls_type & GOT_TLS_IE)
13260 /* R_ARM_TLS_IE32 needs one GOT slot. */
13264 dyn = htab->root.dynamic_sections_created;
13267 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
13269 || !SYMBOL_REFERENCES_LOCAL (info, h)))
13272 if (tls_type != GOT_NORMAL
13273 && (info->shared || indx != 0)
13274 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13275 || h->root.type != bfd_link_hash_undefweak))
13277 if (tls_type & GOT_TLS_IE)
13278 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13280 if (tls_type & GOT_TLS_GD)
13281 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13283 if (tls_type & GOT_TLS_GDESC)
13285 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13286 /* GDESC needs a trampoline to jump to. */
13287 htab->tls_trampoline = -1;
13290 /* Only GD needs it. GDESC just emits one relocation per
13292 if ((tls_type & GOT_TLS_GD) && indx != 0)
13293 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13295 else if (!SYMBOL_REFERENCES_LOCAL (info, h))
13297 if (htab->root.dynamic_sections_created)
13298 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
13299 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13301 else if (h->type == STT_GNU_IFUNC
13302 && eh->plt.noncall_refcount == 0)
13303 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
13304 they all resolve dynamically instead. Reserve room for the
13305 GOT entry's R_ARM_IRELATIVE relocation. */
13306 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
13307 else if (info->shared)
13308 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
13309 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13313 h->got.offset = (bfd_vma) -1;
13315 /* Allocate stubs for exported Thumb functions on v4t. */
13316 if (!htab->use_blx && h->dynindx != -1
13318 && h->target_internal == ST_BRANCH_TO_THUMB
13319 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
13321 struct elf_link_hash_entry * th;
13322 struct bfd_link_hash_entry * bh;
13323 struct elf_link_hash_entry * myh;
13327 /* Create a new symbol to regist the real location of the function. */
13328 s = h->root.u.def.section;
13329 sprintf (name, "__real_%s", h->root.root.string);
13330 _bfd_generic_link_add_one_symbol (info, s->owner,
13331 name, BSF_GLOBAL, s,
13332 h->root.u.def.value,
13333 NULL, TRUE, FALSE, &bh);
13335 myh = (struct elf_link_hash_entry *) bh;
13336 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13337 myh->forced_local = 1;
13338 myh->target_internal = ST_BRANCH_TO_THUMB;
13339 eh->export_glue = myh;
13340 th = record_arm_to_thumb_glue (info, h);
13341 /* Point the symbol at the stub. */
13342 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13343 h->target_internal = ST_BRANCH_TO_ARM;
13344 h->root.u.def.section = th->root.u.def.section;
13345 h->root.u.def.value = th->root.u.def.value & ~1;
13348 if (eh->dyn_relocs == NULL)
13351 /* In the shared -Bsymbolic case, discard space allocated for
13352 dynamic pc-relative relocs against symbols which turn out to be
13353 defined in regular objects. For the normal shared case, discard
13354 space for pc-relative relocs that have become local due to symbol
13355 visibility changes. */
13357 if (info->shared || htab->root.is_relocatable_executable)
13359 /* The only relocs that use pc_count are R_ARM_REL32 and
13360 R_ARM_REL32_NOI, which will appear on something like
13361 ".long foo - .". We want calls to protected symbols to resolve
13362 directly to the function rather than going via the plt. If people
13363 want function pointer comparisons to work as expected then they
13364 should avoid writing assembly like ".long foo - .". */
13365 if (SYMBOL_CALLS_LOCAL (info, h))
13367 struct elf_dyn_relocs **pp;
13369 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13371 p->count -= p->pc_count;
13380 if (htab->vxworks_p)
13382 struct elf_dyn_relocs **pp;
13384 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13386 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13393 /* Also discard relocs on undefined weak syms with non-default
13395 if (eh->dyn_relocs != NULL
13396 && h->root.type == bfd_link_hash_undefweak)
13398 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13399 eh->dyn_relocs = NULL;
13401 /* Make sure undefined weak symbols are output as a dynamic
13403 else if (h->dynindx == -1
13404 && !h->forced_local)
13406 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13411 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13412 && h->root.type == bfd_link_hash_new)
13414 /* Output absolute symbols so that we can create relocations
13415 against them. For normal symbols we output a relocation
13416 against the section that contains them. */
13417 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13424 /* For the non-shared case, discard space for relocs against
13425 symbols which turn out to need copy relocs or are not
13428 if (!h->non_got_ref
13429 && ((h->def_dynamic
13430 && !h->def_regular)
13431 || (htab->root.dynamic_sections_created
13432 && (h->root.type == bfd_link_hash_undefweak
13433 || h->root.type == bfd_link_hash_undefined))))
13435 /* Make sure this symbol is output as a dynamic symbol.
13436 Undefined weak syms won't yet be marked as dynamic. */
13437 if (h->dynindx == -1
13438 && !h->forced_local)
13440 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13444 /* If that succeeded, we know we'll be keeping all the
13446 if (h->dynindx != -1)
13450 eh->dyn_relocs = NULL;
13455 /* Finally, allocate space. */
13456 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13458 asection *sreloc = elf_section_data (p->sec)->sreloc;
13459 if (h->type == STT_GNU_IFUNC
13460 && eh->plt.noncall_refcount == 0
13461 && SYMBOL_REFERENCES_LOCAL (info, h))
13462 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13464 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13470 /* Find any dynamic relocs that apply to read-only sections. */
13473 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13475 struct elf32_arm_link_hash_entry * eh;
13476 struct elf_dyn_relocs * p;
13478 eh = (struct elf32_arm_link_hash_entry *) h;
13479 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13481 asection *s = p->sec;
13483 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13485 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13487 info->flags |= DF_TEXTREL;
13489 /* Not an error, just cut short the traversal. */
13497 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13500 struct elf32_arm_link_hash_table *globals;
13502 globals = elf32_arm_hash_table (info);
13503 if (globals == NULL)
13506 globals->byteswap_code = byteswap_code;
13509 /* Set the sizes of the dynamic sections. */
13512 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13513 struct bfd_link_info * info)
13518 bfd_boolean relocs;
13520 struct elf32_arm_link_hash_table *htab;
13522 htab = elf32_arm_hash_table (info);
13526 dynobj = elf_hash_table (info)->dynobj;
13527 BFD_ASSERT (dynobj != NULL);
13528 check_use_blx (htab);
13530 if (elf_hash_table (info)->dynamic_sections_created)
13532 /* Set the contents of the .interp section to the interpreter. */
13533 if (info->executable)
13535 s = bfd_get_linker_section (dynobj, ".interp");
13536 BFD_ASSERT (s != NULL);
13537 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13538 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13542 /* Set up .got offsets for local syms, and space for local dynamic
13544 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13546 bfd_signed_vma *local_got;
13547 bfd_signed_vma *end_local_got;
13548 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13549 char *local_tls_type;
13550 bfd_vma *local_tlsdesc_gotent;
13551 bfd_size_type locsymcount;
13552 Elf_Internal_Shdr *symtab_hdr;
13554 bfd_boolean is_vxworks = htab->vxworks_p;
13555 unsigned int symndx;
13557 if (! is_arm_elf (ibfd))
13560 for (s = ibfd->sections; s != NULL; s = s->next)
13562 struct elf_dyn_relocs *p;
13564 for (p = (struct elf_dyn_relocs *)
13565 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13567 if (!bfd_is_abs_section (p->sec)
13568 && bfd_is_abs_section (p->sec->output_section))
13570 /* Input section has been discarded, either because
13571 it is a copy of a linkonce section or due to
13572 linker script /DISCARD/, so we'll be discarding
13575 else if (is_vxworks
13576 && strcmp (p->sec->output_section->name,
13579 /* Relocations in vxworks .tls_vars sections are
13580 handled specially by the loader. */
13582 else if (p->count != 0)
13584 srel = elf_section_data (p->sec)->sreloc;
13585 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13586 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13587 info->flags |= DF_TEXTREL;
13592 local_got = elf_local_got_refcounts (ibfd);
13596 symtab_hdr = & elf_symtab_hdr (ibfd);
13597 locsymcount = symtab_hdr->sh_info;
13598 end_local_got = local_got + locsymcount;
13599 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13600 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13601 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13603 s = htab->root.sgot;
13604 srel = htab->root.srelgot;
13605 for (; local_got < end_local_got;
13606 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13607 ++local_tlsdesc_gotent, ++symndx)
13609 *local_tlsdesc_gotent = (bfd_vma) -1;
13610 local_iplt = *local_iplt_ptr;
13611 if (local_iplt != NULL)
13613 struct elf_dyn_relocs *p;
13615 if (local_iplt->root.refcount > 0)
13617 elf32_arm_allocate_plt_entry (info, TRUE,
13620 if (local_iplt->arm.noncall_refcount == 0)
13621 /* All references to the PLT are calls, so all
13622 non-call references can resolve directly to the
13623 run-time target. This means that the .got entry
13624 would be the same as the .igot.plt entry, so there's
13625 no point creating both. */
13630 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13631 local_iplt->root.offset = (bfd_vma) -1;
13634 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13638 psrel = elf_section_data (p->sec)->sreloc;
13639 if (local_iplt->arm.noncall_refcount == 0)
13640 elf32_arm_allocate_irelocs (info, psrel, p->count);
13642 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13645 if (*local_got > 0)
13647 Elf_Internal_Sym *isym;
13649 *local_got = s->size;
13650 if (*local_tls_type & GOT_TLS_GD)
13651 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13653 if (*local_tls_type & GOT_TLS_GDESC)
13655 *local_tlsdesc_gotent = htab->root.sgotplt->size
13656 - elf32_arm_compute_jump_table_size (htab);
13657 htab->root.sgotplt->size += 8;
13658 *local_got = (bfd_vma) -2;
13659 /* plt.got_offset needs to know there's a TLS_DESC
13660 reloc in the middle of .got.plt. */
13661 htab->num_tls_desc++;
13663 if (*local_tls_type & GOT_TLS_IE)
13666 if (*local_tls_type & GOT_NORMAL)
13668 /* If the symbol is both GD and GDESC, *local_got
13669 may have been overwritten. */
13670 *local_got = s->size;
13674 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13678 /* If all references to an STT_GNU_IFUNC PLT are calls,
13679 then all non-call references, including this GOT entry,
13680 resolve directly to the run-time target. */
13681 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13682 && (local_iplt == NULL
13683 || local_iplt->arm.noncall_refcount == 0))
13684 elf32_arm_allocate_irelocs (info, srel, 1);
13685 else if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13686 || *local_tls_type & GOT_TLS_GD)
13687 elf32_arm_allocate_dynrelocs (info, srel, 1);
13689 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13691 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13692 htab->tls_trampoline = -1;
13696 *local_got = (bfd_vma) -1;
13700 if (htab->tls_ldm_got.refcount > 0)
13702 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13703 for R_ARM_TLS_LDM32 relocations. */
13704 htab->tls_ldm_got.offset = htab->root.sgot->size;
13705 htab->root.sgot->size += 8;
13707 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13710 htab->tls_ldm_got.offset = -1;
13712 /* Allocate global sym .plt and .got entries, and space for global
13713 sym dynamic relocs. */
13714 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13716 /* Here we rummage through the found bfds to collect glue information. */
13717 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13719 if (! is_arm_elf (ibfd))
13722 /* Initialise mapping tables for code/data. */
13723 bfd_elf32_arm_init_maps (ibfd);
13725 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13726 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13727 /* xgettext:c-format */
13728 _bfd_error_handler (_("Errors encountered processing file %s"),
13732 /* Allocate space for the glue sections now that we've sized them. */
13733 bfd_elf32_arm_allocate_interworking_sections (info);
13735 /* For every jump slot reserved in the sgotplt, reloc_count is
13736 incremented. However, when we reserve space for TLS descriptors,
13737 it's not incremented, so in order to compute the space reserved
13738 for them, it suffices to multiply the reloc count by the jump
13740 if (htab->root.srelplt)
13741 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13743 if (htab->tls_trampoline)
13745 if (htab->root.splt->size == 0)
13746 htab->root.splt->size += htab->plt_header_size;
13748 htab->tls_trampoline = htab->root.splt->size;
13749 htab->root.splt->size += htab->plt_entry_size;
13751 /* If we're not using lazy TLS relocations, don't generate the
13752 PLT and GOT entries they require. */
13753 if (!(info->flags & DF_BIND_NOW))
13755 htab->dt_tlsdesc_got = htab->root.sgot->size;
13756 htab->root.sgot->size += 4;
13758 htab->dt_tlsdesc_plt = htab->root.splt->size;
13759 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
13763 /* The check_relocs and adjust_dynamic_symbol entry points have
13764 determined the sizes of the various dynamic sections. Allocate
13765 memory for them. */
13768 for (s = dynobj->sections; s != NULL; s = s->next)
13772 if ((s->flags & SEC_LINKER_CREATED) == 0)
13775 /* It's OK to base decisions on the section name, because none
13776 of the dynobj section names depend upon the input files. */
13777 name = bfd_get_section_name (dynobj, s);
13779 if (s == htab->root.splt)
13781 /* Remember whether there is a PLT. */
13782 plt = s->size != 0;
13784 else if (CONST_STRNEQ (name, ".rel"))
13788 /* Remember whether there are any reloc sections other
13789 than .rel(a).plt and .rela.plt.unloaded. */
13790 if (s != htab->root.srelplt && s != htab->srelplt2)
13793 /* We use the reloc_count field as a counter if we need
13794 to copy relocs into the output file. */
13795 s->reloc_count = 0;
13798 else if (s != htab->root.sgot
13799 && s != htab->root.sgotplt
13800 && s != htab->root.iplt
13801 && s != htab->root.igotplt
13802 && s != htab->sdynbss)
13804 /* It's not one of our sections, so don't allocate space. */
13810 /* If we don't need this section, strip it from the
13811 output file. This is mostly to handle .rel(a).bss and
13812 .rel(a).plt. We must create both sections in
13813 create_dynamic_sections, because they must be created
13814 before the linker maps input sections to output
13815 sections. The linker does that before
13816 adjust_dynamic_symbol is called, and it is that
13817 function which decides whether anything needs to go
13818 into these sections. */
13819 s->flags |= SEC_EXCLUDE;
13823 if ((s->flags & SEC_HAS_CONTENTS) == 0)
13826 /* Allocate memory for the section contents. */
13827 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
13828 if (s->contents == NULL)
13832 if (elf_hash_table (info)->dynamic_sections_created)
13834 /* Add some entries to the .dynamic section. We fill in the
13835 values later, in elf32_arm_finish_dynamic_sections, but we
13836 must add the entries now so that we get the correct size for
13837 the .dynamic section. The DT_DEBUG entry is filled in by the
13838 dynamic linker and used by the debugger. */
13839 #define add_dynamic_entry(TAG, VAL) \
13840 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
13842 if (info->executable)
13844 if (!add_dynamic_entry (DT_DEBUG, 0))
13850 if ( !add_dynamic_entry (DT_PLTGOT, 0)
13851 || !add_dynamic_entry (DT_PLTRELSZ, 0)
13852 || !add_dynamic_entry (DT_PLTREL,
13853 htab->use_rel ? DT_REL : DT_RELA)
13854 || !add_dynamic_entry (DT_JMPREL, 0))
13857 if (htab->dt_tlsdesc_plt &&
13858 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
13859 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
13867 if (!add_dynamic_entry (DT_REL, 0)
13868 || !add_dynamic_entry (DT_RELSZ, 0)
13869 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
13874 if (!add_dynamic_entry (DT_RELA, 0)
13875 || !add_dynamic_entry (DT_RELASZ, 0)
13876 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
13881 /* If any dynamic relocs apply to a read-only section,
13882 then we need a DT_TEXTREL entry. */
13883 if ((info->flags & DF_TEXTREL) == 0)
13884 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
13887 if ((info->flags & DF_TEXTREL) != 0)
13889 if (!add_dynamic_entry (DT_TEXTREL, 0))
13892 if (htab->vxworks_p
13893 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
13896 #undef add_dynamic_entry
13901 /* Size sections even though they're not dynamic. We use it to setup
13902 _TLS_MODULE_BASE_, if needed. */
13905 elf32_arm_always_size_sections (bfd *output_bfd,
13906 struct bfd_link_info *info)
13910 if (info->relocatable)
13913 tls_sec = elf_hash_table (info)->tls_sec;
13917 struct elf_link_hash_entry *tlsbase;
13919 tlsbase = elf_link_hash_lookup
13920 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
13924 struct bfd_link_hash_entry *bh = NULL;
13925 const struct elf_backend_data *bed
13926 = get_elf_backend_data (output_bfd);
13928 if (!(_bfd_generic_link_add_one_symbol
13929 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
13930 tls_sec, 0, NULL, FALSE,
13931 bed->collect, &bh)))
13934 tlsbase->type = STT_TLS;
13935 tlsbase = (struct elf_link_hash_entry *)bh;
13936 tlsbase->def_regular = 1;
13937 tlsbase->other = STV_HIDDEN;
13938 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
13944 /* Finish up dynamic symbol handling. We set the contents of various
13945 dynamic sections here. */
13948 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
13949 struct bfd_link_info * info,
13950 struct elf_link_hash_entry * h,
13951 Elf_Internal_Sym * sym)
13953 struct elf32_arm_link_hash_table *htab;
13954 struct elf32_arm_link_hash_entry *eh;
13956 htab = elf32_arm_hash_table (info);
13960 eh = (struct elf32_arm_link_hash_entry *) h;
13962 if (h->plt.offset != (bfd_vma) -1)
13966 BFD_ASSERT (h->dynindx != -1);
13967 elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
13971 if (!h->def_regular)
13973 /* Mark the symbol as undefined, rather than as defined in
13974 the .plt section. Leave the value alone. */
13975 sym->st_shndx = SHN_UNDEF;
13976 /* If the symbol is weak, we do need to clear the value.
13977 Otherwise, the PLT entry would provide a definition for
13978 the symbol even if the symbol wasn't defined anywhere,
13979 and so the symbol would never be NULL. */
13980 if (!h->ref_regular_nonweak)
13983 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
13985 /* At least one non-call relocation references this .iplt entry,
13986 so the .iplt entry is the function's canonical address. */
13987 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
13988 sym->st_target_internal = ST_BRANCH_TO_ARM;
13989 sym->st_shndx = (_bfd_elf_section_from_bfd_section
13990 (output_bfd, htab->root.iplt->output_section));
13991 sym->st_value = (h->plt.offset
13992 + htab->root.iplt->output_section->vma
13993 + htab->root.iplt->output_offset);
14000 Elf_Internal_Rela rel;
14002 /* This symbol needs a copy reloc. Set it up. */
14003 BFD_ASSERT (h->dynindx != -1
14004 && (h->root.type == bfd_link_hash_defined
14005 || h->root.type == bfd_link_hash_defweak));
14008 BFD_ASSERT (s != NULL);
14011 rel.r_offset = (h->root.u.def.value
14012 + h->root.u.def.section->output_section->vma
14013 + h->root.u.def.section->output_offset);
14014 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
14015 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
14018 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
14019 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
14020 to the ".got" section. */
14021 if (h == htab->root.hdynamic
14022 || (!htab->vxworks_p && h == htab->root.hgot))
14023 sym->st_shndx = SHN_ABS;
14029 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14031 const unsigned long *template, unsigned count)
14035 for (ix = 0; ix != count; ix++)
14037 unsigned long insn = template[ix];
14039 /* Emit mov pc,rx if bx is not permitted. */
14040 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14041 insn = (insn & 0xf000000f) | 0x01a0f000;
14042 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14046 /* Finish up the dynamic sections. */
14049 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14054 struct elf32_arm_link_hash_table *htab;
14056 htab = elf32_arm_hash_table (info);
14060 dynobj = elf_hash_table (info)->dynobj;
14062 sgot = htab->root.sgotplt;
14063 /* A broken linker script might have discarded the dynamic sections.
14064 Catch this here so that we do not seg-fault later on. */
14065 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14067 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
14069 if (elf_hash_table (info)->dynamic_sections_created)
14072 Elf32_External_Dyn *dyncon, *dynconend;
14074 splt = htab->root.splt;
14075 BFD_ASSERT (splt != NULL && sdyn != NULL);
14076 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14078 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14079 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14081 for (; dyncon < dynconend; dyncon++)
14083 Elf_Internal_Dyn dyn;
14087 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
14094 if (htab->vxworks_p
14095 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
14096 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14101 goto get_vma_if_bpabi;
14104 goto get_vma_if_bpabi;
14107 goto get_vma_if_bpabi;
14109 name = ".gnu.version";
14110 goto get_vma_if_bpabi;
14112 name = ".gnu.version_d";
14113 goto get_vma_if_bpabi;
14115 name = ".gnu.version_r";
14116 goto get_vma_if_bpabi;
14122 name = RELOC_SECTION (htab, ".plt");
14124 s = bfd_get_section_by_name (output_bfd, name);
14127 /* PR ld/14397: Issue an error message if a required section is missing. */
14128 (*_bfd_error_handler)
14129 (_("error: required section '%s' not found in the linker script"), name);
14130 bfd_set_error (bfd_error_invalid_operation);
14133 if (!htab->symbian_p)
14134 dyn.d_un.d_ptr = s->vma;
14136 /* In the BPABI, tags in the PT_DYNAMIC section point
14137 at the file offset, not the memory address, for the
14138 convenience of the post linker. */
14139 dyn.d_un.d_ptr = s->filepos;
14140 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14144 if (htab->symbian_p)
14149 s = htab->root.srelplt;
14150 BFD_ASSERT (s != NULL);
14151 dyn.d_un.d_val = s->size;
14152 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14157 if (!htab->symbian_p)
14159 /* My reading of the SVR4 ABI indicates that the
14160 procedure linkage table relocs (DT_JMPREL) should be
14161 included in the overall relocs (DT_REL). This is
14162 what Solaris does. However, UnixWare can not handle
14163 that case. Therefore, we override the DT_RELSZ entry
14164 here to make it not include the JMPREL relocs. Since
14165 the linker script arranges for .rel(a).plt to follow all
14166 other relocation sections, we don't have to worry
14167 about changing the DT_REL entry. */
14168 s = htab->root.srelplt;
14170 dyn.d_un.d_val -= s->size;
14171 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14174 /* Fall through. */
14178 /* In the BPABI, the DT_REL tag must point at the file
14179 offset, not the VMA, of the first relocation
14180 section. So, we use code similar to that in
14181 elflink.c, but do not check for SHF_ALLOC on the
14182 relcoation section, since relocations sections are
14183 never allocated under the BPABI. The comments above
14184 about Unixware notwithstanding, we include all of the
14185 relocations here. */
14186 if (htab->symbian_p)
14189 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
14190 ? SHT_REL : SHT_RELA);
14191 dyn.d_un.d_val = 0;
14192 for (i = 1; i < elf_numsections (output_bfd); i++)
14194 Elf_Internal_Shdr *hdr
14195 = elf_elfsections (output_bfd)[i];
14196 if (hdr->sh_type == type)
14198 if (dyn.d_tag == DT_RELSZ
14199 || dyn.d_tag == DT_RELASZ)
14200 dyn.d_un.d_val += hdr->sh_size;
14201 else if ((ufile_ptr) hdr->sh_offset
14202 <= dyn.d_un.d_val - 1)
14203 dyn.d_un.d_val = hdr->sh_offset;
14206 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14210 case DT_TLSDESC_PLT:
14211 s = htab->root.splt;
14212 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14213 + htab->dt_tlsdesc_plt);
14214 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14217 case DT_TLSDESC_GOT:
14218 s = htab->root.sgot;
14219 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14220 + htab->dt_tlsdesc_got);
14221 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14224 /* Set the bottom bit of DT_INIT/FINI if the
14225 corresponding function is Thumb. */
14227 name = info->init_function;
14230 name = info->fini_function;
14232 /* If it wasn't set by elf_bfd_final_link
14233 then there is nothing to adjust. */
14234 if (dyn.d_un.d_val != 0)
14236 struct elf_link_hash_entry * eh;
14238 eh = elf_link_hash_lookup (elf_hash_table (info), name,
14239 FALSE, FALSE, TRUE);
14240 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
14242 dyn.d_un.d_val |= 1;
14243 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14250 /* Fill in the first entry in the procedure linkage table. */
14251 if (splt->size > 0 && htab->plt_header_size)
14253 const bfd_vma *plt0_entry;
14254 bfd_vma got_address, plt_address, got_displacement;
14256 /* Calculate the addresses of the GOT and PLT. */
14257 got_address = sgot->output_section->vma + sgot->output_offset;
14258 plt_address = splt->output_section->vma + splt->output_offset;
14260 if (htab->vxworks_p)
14262 /* The VxWorks GOT is relocated by the dynamic linker.
14263 Therefore, we must emit relocations rather than simply
14264 computing the values now. */
14265 Elf_Internal_Rela rel;
14267 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
14268 put_arm_insn (htab, output_bfd, plt0_entry[0],
14269 splt->contents + 0);
14270 put_arm_insn (htab, output_bfd, plt0_entry[1],
14271 splt->contents + 4);
14272 put_arm_insn (htab, output_bfd, plt0_entry[2],
14273 splt->contents + 8);
14274 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
14276 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
14277 rel.r_offset = plt_address + 12;
14278 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14280 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
14281 htab->srelplt2->contents);
14283 else if (htab->nacl_p)
14287 got_displacement = got_address + 8 - (plt_address + 16);
14289 put_arm_insn (htab, output_bfd,
14290 elf32_arm_nacl_plt0_entry[0]
14291 | arm_movw_immediate (got_displacement),
14292 splt->contents + 0);
14293 put_arm_insn (htab, output_bfd,
14294 elf32_arm_nacl_plt0_entry[1]
14295 | arm_movt_immediate (got_displacement),
14296 splt->contents + 4);
14297 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14298 put_arm_insn (htab, output_bfd,
14299 elf32_arm_nacl_plt0_entry[i],
14300 splt->contents + (i * 4));
14304 got_displacement = got_address - (plt_address + 16);
14306 plt0_entry = elf32_arm_plt0_entry;
14307 put_arm_insn (htab, output_bfd, plt0_entry[0],
14308 splt->contents + 0);
14309 put_arm_insn (htab, output_bfd, plt0_entry[1],
14310 splt->contents + 4);
14311 put_arm_insn (htab, output_bfd, plt0_entry[2],
14312 splt->contents + 8);
14313 put_arm_insn (htab, output_bfd, plt0_entry[3],
14314 splt->contents + 12);
14316 #ifdef FOUR_WORD_PLT
14317 /* The displacement value goes in the otherwise-unused
14318 last word of the second entry. */
14319 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
14321 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
14326 /* UnixWare sets the entsize of .plt to 4, although that doesn't
14327 really seem like the right value. */
14328 if (splt->output_section->owner == output_bfd)
14329 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
14331 if (htab->dt_tlsdesc_plt)
14333 bfd_vma got_address
14334 = sgot->output_section->vma + sgot->output_offset;
14335 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
14336 + htab->root.sgot->output_offset);
14337 bfd_vma plt_address
14338 = splt->output_section->vma + splt->output_offset;
14340 arm_put_trampoline (htab, output_bfd,
14341 splt->contents + htab->dt_tlsdesc_plt,
14342 dl_tlsdesc_lazy_trampoline, 6);
14344 bfd_put_32 (output_bfd,
14345 gotplt_address + htab->dt_tlsdesc_got
14346 - (plt_address + htab->dt_tlsdesc_plt)
14347 - dl_tlsdesc_lazy_trampoline[6],
14348 splt->contents + htab->dt_tlsdesc_plt + 24);
14349 bfd_put_32 (output_bfd,
14350 got_address - (plt_address + htab->dt_tlsdesc_plt)
14351 - dl_tlsdesc_lazy_trampoline[7],
14352 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
14355 if (htab->tls_trampoline)
14357 arm_put_trampoline (htab, output_bfd,
14358 splt->contents + htab->tls_trampoline,
14359 tls_trampoline, 3);
14360 #ifdef FOUR_WORD_PLT
14361 bfd_put_32 (output_bfd, 0x00000000,
14362 splt->contents + htab->tls_trampoline + 12);
14366 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14368 /* Correct the .rel(a).plt.unloaded relocations. They will have
14369 incorrect symbol indexes. */
14373 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14374 / htab->plt_entry_size);
14375 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14377 for (; num_plts; num_plts--)
14379 Elf_Internal_Rela rel;
14381 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14382 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14383 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14384 p += RELOC_SIZE (htab);
14386 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14387 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14388 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14389 p += RELOC_SIZE (htab);
14394 /* Fill in the first three entries in the global offset table. */
14397 if (sgot->size > 0)
14400 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14402 bfd_put_32 (output_bfd,
14403 sdyn->output_section->vma + sdyn->output_offset,
14405 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14406 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14409 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14416 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14418 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14419 struct elf32_arm_link_hash_table *globals;
14421 i_ehdrp = elf_elfheader (abfd);
14423 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14424 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14426 i_ehdrp->e_ident[EI_OSABI] = 0;
14427 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14431 globals = elf32_arm_hash_table (link_info);
14432 if (globals != NULL && globals->byteswap_code)
14433 i_ehdrp->e_flags |= EF_ARM_BE8;
14436 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
14437 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
14439 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
14441 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
14443 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
14447 static enum elf_reloc_type_class
14448 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
14450 switch ((int) ELF32_R_TYPE (rela->r_info))
14452 case R_ARM_RELATIVE:
14453 return reloc_class_relative;
14454 case R_ARM_JUMP_SLOT:
14455 return reloc_class_plt;
14457 return reloc_class_copy;
14459 return reloc_class_normal;
14464 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14466 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14469 /* Return TRUE if this is an unwinding table entry. */
14472 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14474 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14475 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14479 /* Set the type and flags for an ARM section. We do this by
14480 the section name, which is a hack, but ought to work. */
14483 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14487 name = bfd_get_section_name (abfd, sec);
14489 if (is_arm_elf_unwind_section_name (abfd, name))
14491 hdr->sh_type = SHT_ARM_EXIDX;
14492 hdr->sh_flags |= SHF_LINK_ORDER;
14497 /* Handle an ARM specific section when reading an object file. This is
14498 called when bfd_section_from_shdr finds a section with an unknown
14502 elf32_arm_section_from_shdr (bfd *abfd,
14503 Elf_Internal_Shdr * hdr,
14507 /* There ought to be a place to keep ELF backend specific flags, but
14508 at the moment there isn't one. We just keep track of the
14509 sections by their name, instead. Fortunately, the ABI gives
14510 names for all the ARM specific sections, so we will probably get
14512 switch (hdr->sh_type)
14514 case SHT_ARM_EXIDX:
14515 case SHT_ARM_PREEMPTMAP:
14516 case SHT_ARM_ATTRIBUTES:
14523 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14529 static _arm_elf_section_data *
14530 get_arm_elf_section_data (asection * sec)
14532 if (sec && sec->owner && is_arm_elf (sec->owner))
14533 return elf32_arm_section_data (sec);
14541 struct bfd_link_info *info;
14544 int (*func) (void *, const char *, Elf_Internal_Sym *,
14545 asection *, struct elf_link_hash_entry *);
14546 } output_arch_syminfo;
14548 enum map_symbol_type
14556 /* Output a single mapping symbol. */
14559 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14560 enum map_symbol_type type,
14563 static const char *names[3] = {"$a", "$t", "$d"};
14564 Elf_Internal_Sym sym;
14566 sym.st_value = osi->sec->output_section->vma
14567 + osi->sec->output_offset
14571 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14572 sym.st_shndx = osi->sec_shndx;
14573 sym.st_target_internal = 0;
14574 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14575 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
14578 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14579 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14582 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14583 bfd_boolean is_iplt_entry_p,
14584 union gotplt_union *root_plt,
14585 struct arm_plt_info *arm_plt)
14587 struct elf32_arm_link_hash_table *htab;
14588 bfd_vma addr, plt_header_size;
14590 if (root_plt->offset == (bfd_vma) -1)
14593 htab = elf32_arm_hash_table (osi->info);
14597 if (is_iplt_entry_p)
14599 osi->sec = htab->root.iplt;
14600 plt_header_size = 0;
14604 osi->sec = htab->root.splt;
14605 plt_header_size = htab->plt_header_size;
14607 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14608 (osi->info->output_bfd, osi->sec->output_section));
14610 addr = root_plt->offset & -2;
14611 if (htab->symbian_p)
14613 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14615 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14618 else if (htab->vxworks_p)
14620 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14622 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14624 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14626 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14629 else if (htab->nacl_p)
14631 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14636 bfd_boolean thumb_stub_p;
14638 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14641 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14644 #ifdef FOUR_WORD_PLT
14645 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14647 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14650 /* A three-word PLT with no Thumb thunk contains only Arm code,
14651 so only need to output a mapping symbol for the first PLT entry and
14652 entries with thumb thunks. */
14653 if (thumb_stub_p || addr == plt_header_size)
14655 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14664 /* Output mapping symbols for PLT entries associated with H. */
14667 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14669 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14670 struct elf32_arm_link_hash_entry *eh;
14672 if (h->root.type == bfd_link_hash_indirect)
14675 if (h->root.type == bfd_link_hash_warning)
14676 /* When warning symbols are created, they **replace** the "real"
14677 entry in the hash table, thus we never get to see the real
14678 symbol in a hash traversal. So look at it now. */
14679 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14681 eh = (struct elf32_arm_link_hash_entry *) h;
14682 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14683 &h->plt, &eh->plt);
14686 /* Output a single local symbol for a generated stub. */
14689 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14690 bfd_vma offset, bfd_vma size)
14692 Elf_Internal_Sym sym;
14694 sym.st_value = osi->sec->output_section->vma
14695 + osi->sec->output_offset
14697 sym.st_size = size;
14699 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14700 sym.st_shndx = osi->sec_shndx;
14701 sym.st_target_internal = 0;
14702 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
14706 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
14709 struct elf32_arm_stub_hash_entry *stub_entry;
14710 asection *stub_sec;
14713 output_arch_syminfo *osi;
14714 const insn_sequence *template_sequence;
14715 enum stub_insn_type prev_type;
14718 enum map_symbol_type sym_type;
14720 /* Massage our args to the form they really have. */
14721 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14722 osi = (output_arch_syminfo *) in_arg;
14724 stub_sec = stub_entry->stub_sec;
14726 /* Ensure this stub is attached to the current section being
14728 if (stub_sec != osi->sec)
14731 addr = (bfd_vma) stub_entry->stub_offset;
14732 stub_name = stub_entry->output_name;
14734 template_sequence = stub_entry->stub_template;
14735 switch (template_sequence[0].type)
14738 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
14743 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
14744 stub_entry->stub_size))
14752 prev_type = DATA_TYPE;
14754 for (i = 0; i < stub_entry->stub_template_size; i++)
14756 switch (template_sequence[i].type)
14759 sym_type = ARM_MAP_ARM;
14764 sym_type = ARM_MAP_THUMB;
14768 sym_type = ARM_MAP_DATA;
14776 if (template_sequence[i].type != prev_type)
14778 prev_type = template_sequence[i].type;
14779 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
14783 switch (template_sequence[i].type)
14807 /* Output mapping symbols for linker generated sections,
14808 and for those data-only sections that do not have a
14812 elf32_arm_output_arch_local_syms (bfd *output_bfd,
14813 struct bfd_link_info *info,
14815 int (*func) (void *, const char *,
14816 Elf_Internal_Sym *,
14818 struct elf_link_hash_entry *))
14820 output_arch_syminfo osi;
14821 struct elf32_arm_link_hash_table *htab;
14823 bfd_size_type size;
14826 htab = elf32_arm_hash_table (info);
14830 check_use_blx (htab);
14832 osi.flaginfo = flaginfo;
14836 /* Add a $d mapping symbol to data-only sections that
14837 don't have any mapping symbol. This may result in (harmless) redundant
14838 mapping symbols. */
14839 for (input_bfd = info->input_bfds;
14841 input_bfd = input_bfd->link_next)
14843 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
14844 for (osi.sec = input_bfd->sections;
14846 osi.sec = osi.sec->next)
14848 if (osi.sec->output_section != NULL
14849 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
14851 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
14852 == SEC_HAS_CONTENTS
14853 && get_arm_elf_section_data (osi.sec) != NULL
14854 && get_arm_elf_section_data (osi.sec)->mapcount == 0
14855 && osi.sec->size > 0
14856 && (osi.sec->flags & SEC_EXCLUDE) == 0)
14858 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14859 (output_bfd, osi.sec->output_section);
14860 if (osi.sec_shndx != (int)SHN_BAD)
14861 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
14866 /* ARM->Thumb glue. */
14867 if (htab->arm_glue_size > 0)
14869 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14870 ARM2THUMB_GLUE_SECTION_NAME);
14872 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14873 (output_bfd, osi.sec->output_section);
14874 if (info->shared || htab->root.is_relocatable_executable
14875 || htab->pic_veneer)
14876 size = ARM2THUMB_PIC_GLUE_SIZE;
14877 else if (htab->use_blx)
14878 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
14880 size = ARM2THUMB_STATIC_GLUE_SIZE;
14882 for (offset = 0; offset < htab->arm_glue_size; offset += size)
14884 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
14885 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
14889 /* Thumb->ARM glue. */
14890 if (htab->thumb_glue_size > 0)
14892 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14893 THUMB2ARM_GLUE_SECTION_NAME);
14895 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14896 (output_bfd, osi.sec->output_section);
14897 size = THUMB2ARM_GLUE_SIZE;
14899 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
14901 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
14902 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
14906 /* ARMv4 BX veneers. */
14907 if (htab->bx_glue_size > 0)
14909 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14910 ARM_BX_GLUE_SECTION_NAME);
14912 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14913 (output_bfd, osi.sec->output_section);
14915 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
14918 /* Long calls stubs. */
14919 if (htab->stub_bfd && htab->stub_bfd->sections)
14921 asection* stub_sec;
14923 for (stub_sec = htab->stub_bfd->sections;
14925 stub_sec = stub_sec->next)
14927 /* Ignore non-stub sections. */
14928 if (!strstr (stub_sec->name, STUB_SUFFIX))
14931 osi.sec = stub_sec;
14933 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14934 (output_bfd, osi.sec->output_section);
14936 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
14940 /* Finally, output mapping symbols for the PLT. */
14941 if (htab->root.splt && htab->root.splt->size > 0)
14943 osi.sec = htab->root.splt;
14944 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
14945 (output_bfd, osi.sec->output_section));
14947 /* Output mapping symbols for the plt header. SymbianOS does not have a
14949 if (htab->vxworks_p)
14951 /* VxWorks shared libraries have no PLT header. */
14954 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14956 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
14960 else if (htab->nacl_p)
14962 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14965 else if (!htab->symbian_p)
14967 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14969 #ifndef FOUR_WORD_PLT
14970 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
14975 if ((htab->root.splt && htab->root.splt->size > 0)
14976 || (htab->root.iplt && htab->root.iplt->size > 0))
14978 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
14979 for (input_bfd = info->input_bfds;
14981 input_bfd = input_bfd->link_next)
14983 struct arm_local_iplt_info **local_iplt;
14984 unsigned int i, num_syms;
14986 local_iplt = elf32_arm_local_iplt (input_bfd);
14987 if (local_iplt != NULL)
14989 num_syms = elf_symtab_hdr (input_bfd).sh_info;
14990 for (i = 0; i < num_syms; i++)
14991 if (local_iplt[i] != NULL
14992 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
14993 &local_iplt[i]->root,
14994 &local_iplt[i]->arm))
14999 if (htab->dt_tlsdesc_plt != 0)
15001 /* Mapping symbols for the lazy tls trampoline. */
15002 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
15005 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15006 htab->dt_tlsdesc_plt + 24))
15009 if (htab->tls_trampoline != 0)
15011 /* Mapping symbols for the tls trampoline. */
15012 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
15014 #ifdef FOUR_WORD_PLT
15015 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15016 htab->tls_trampoline + 12))
15024 /* Allocate target specific section data. */
15027 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
15029 if (!sec->used_by_bfd)
15031 _arm_elf_section_data *sdata;
15032 bfd_size_type amt = sizeof (*sdata);
15034 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
15037 sec->used_by_bfd = sdata;
15040 return _bfd_elf_new_section_hook (abfd, sec);
15044 /* Used to order a list of mapping symbols by address. */
15047 elf32_arm_compare_mapping (const void * a, const void * b)
15049 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
15050 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
15052 if (amap->vma > bmap->vma)
15054 else if (amap->vma < bmap->vma)
15056 else if (amap->type > bmap->type)
15057 /* Ensure results do not depend on the host qsort for objects with
15058 multiple mapping symbols at the same address by sorting on type
15061 else if (amap->type < bmap->type)
15067 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
15069 static unsigned long
15070 offset_prel31 (unsigned long addr, bfd_vma offset)
15072 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
15075 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
15079 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
15081 unsigned long first_word = bfd_get_32 (output_bfd, from);
15082 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
15084 /* High bit of first word is supposed to be zero. */
15085 if ((first_word & 0x80000000ul) == 0)
15086 first_word = offset_prel31 (first_word, offset);
15088 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
15089 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
15090 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
15091 second_word = offset_prel31 (second_word, offset);
15093 bfd_put_32 (output_bfd, first_word, to);
15094 bfd_put_32 (output_bfd, second_word, to + 4);
15097 /* Data for make_branch_to_a8_stub(). */
15099 struct a8_branch_to_stub_data
15101 asection *writing_section;
15102 bfd_byte *contents;
15106 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
15107 places for a particular section. */
15110 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
15113 struct elf32_arm_stub_hash_entry *stub_entry;
15114 struct a8_branch_to_stub_data *data;
15115 bfd_byte *contents;
15116 unsigned long branch_insn;
15117 bfd_vma veneered_insn_loc, veneer_entry_loc;
15118 bfd_signed_vma branch_offset;
15120 unsigned int target;
15122 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15123 data = (struct a8_branch_to_stub_data *) in_arg;
15125 if (stub_entry->target_section != data->writing_section
15126 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
15129 contents = data->contents;
15131 veneered_insn_loc = stub_entry->target_section->output_section->vma
15132 + stub_entry->target_section->output_offset
15133 + stub_entry->target_value;
15135 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
15136 + stub_entry->stub_sec->output_offset
15137 + stub_entry->stub_offset;
15139 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
15140 veneered_insn_loc &= ~3u;
15142 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
15144 abfd = stub_entry->target_section->owner;
15145 target = stub_entry->target_value;
15147 /* We attempt to avoid this condition by setting stubs_always_after_branch
15148 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
15149 This check is just to be on the safe side... */
15150 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
15152 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
15153 "allocated in unsafe location"), abfd);
15157 switch (stub_entry->stub_type)
15159 case arm_stub_a8_veneer_b:
15160 case arm_stub_a8_veneer_b_cond:
15161 branch_insn = 0xf0009000;
15164 case arm_stub_a8_veneer_blx:
15165 branch_insn = 0xf000e800;
15168 case arm_stub_a8_veneer_bl:
15170 unsigned int i1, j1, i2, j2, s;
15172 branch_insn = 0xf000d000;
15175 if (branch_offset < -16777216 || branch_offset > 16777214)
15177 /* There's not much we can do apart from complain if this
15179 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
15180 "of range (input file too large)"), abfd);
15184 /* i1 = not(j1 eor s), so:
15186 j1 = (not i1) eor s. */
15188 branch_insn |= (branch_offset >> 1) & 0x7ff;
15189 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
15190 i2 = (branch_offset >> 22) & 1;
15191 i1 = (branch_offset >> 23) & 1;
15192 s = (branch_offset >> 24) & 1;
15195 branch_insn |= j2 << 11;
15196 branch_insn |= j1 << 13;
15197 branch_insn |= s << 26;
15206 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
15207 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
15212 /* Do code byteswapping. Return FALSE afterwards so that the section is
15213 written out as normal. */
15216 elf32_arm_write_section (bfd *output_bfd,
15217 struct bfd_link_info *link_info,
15219 bfd_byte *contents)
15221 unsigned int mapcount, errcount;
15222 _arm_elf_section_data *arm_data;
15223 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
15224 elf32_arm_section_map *map;
15225 elf32_vfp11_erratum_list *errnode;
15228 bfd_vma offset = sec->output_section->vma + sec->output_offset;
15232 if (globals == NULL)
15235 /* If this section has not been allocated an _arm_elf_section_data
15236 structure then we cannot record anything. */
15237 arm_data = get_arm_elf_section_data (sec);
15238 if (arm_data == NULL)
15241 mapcount = arm_data->mapcount;
15242 map = arm_data->map;
15243 errcount = arm_data->erratumcount;
15247 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
15249 for (errnode = arm_data->erratumlist; errnode != 0;
15250 errnode = errnode->next)
15252 bfd_vma target = errnode->vma - offset;
15254 switch (errnode->type)
15256 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
15258 bfd_vma branch_to_veneer;
15259 /* Original condition code of instruction, plus bit mask for
15260 ARM B instruction. */
15261 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
15264 /* The instruction is before the label. */
15267 /* Above offset included in -4 below. */
15268 branch_to_veneer = errnode->u.b.veneer->vma
15269 - errnode->vma - 4;
15271 if ((signed) branch_to_veneer < -(1 << 25)
15272 || (signed) branch_to_veneer >= (1 << 25))
15273 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15274 "range"), output_bfd);
15276 insn |= (branch_to_veneer >> 2) & 0xffffff;
15277 contents[endianflip ^ target] = insn & 0xff;
15278 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15279 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15280 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15284 case VFP11_ERRATUM_ARM_VENEER:
15286 bfd_vma branch_from_veneer;
15289 /* Take size of veneer into account. */
15290 branch_from_veneer = errnode->u.v.branch->vma
15291 - errnode->vma - 12;
15293 if ((signed) branch_from_veneer < -(1 << 25)
15294 || (signed) branch_from_veneer >= (1 << 25))
15295 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15296 "range"), output_bfd);
15298 /* Original instruction. */
15299 insn = errnode->u.v.branch->u.b.vfp_insn;
15300 contents[endianflip ^ target] = insn & 0xff;
15301 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15302 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15303 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15305 /* Branch back to insn after original insn. */
15306 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
15307 contents[endianflip ^ (target + 4)] = insn & 0xff;
15308 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
15309 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
15310 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
15320 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
15322 arm_unwind_table_edit *edit_node
15323 = arm_data->u.exidx.unwind_edit_list;
15324 /* Now, sec->size is the size of the section we will write. The original
15325 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
15326 markers) was sec->rawsize. (This isn't the case if we perform no
15327 edits, then rawsize will be zero and we should use size). */
15328 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
15329 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
15330 unsigned int in_index, out_index;
15331 bfd_vma add_to_offsets = 0;
15333 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
15337 unsigned int edit_index = edit_node->index;
15339 if (in_index < edit_index && in_index * 8 < input_size)
15341 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15342 contents + in_index * 8, add_to_offsets);
15346 else if (in_index == edit_index
15347 || (in_index * 8 >= input_size
15348 && edit_index == UINT_MAX))
15350 switch (edit_node->type)
15352 case DELETE_EXIDX_ENTRY:
15354 add_to_offsets += 8;
15357 case INSERT_EXIDX_CANTUNWIND_AT_END:
15359 asection *text_sec = edit_node->linked_section;
15360 bfd_vma text_offset = text_sec->output_section->vma
15361 + text_sec->output_offset
15363 bfd_vma exidx_offset = offset + out_index * 8;
15364 unsigned long prel31_offset;
15366 /* Note: this is meant to be equivalent to an
15367 R_ARM_PREL31 relocation. These synthetic
15368 EXIDX_CANTUNWIND markers are not relocated by the
15369 usual BFD method. */
15370 prel31_offset = (text_offset - exidx_offset)
15373 /* First address we can't unwind. */
15374 bfd_put_32 (output_bfd, prel31_offset,
15375 &edited_contents[out_index * 8]);
15377 /* Code for EXIDX_CANTUNWIND. */
15378 bfd_put_32 (output_bfd, 0x1,
15379 &edited_contents[out_index * 8 + 4]);
15382 add_to_offsets -= 8;
15387 edit_node = edit_node->next;
15392 /* No more edits, copy remaining entries verbatim. */
15393 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15394 contents + in_index * 8, add_to_offsets);
15400 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15401 bfd_set_section_contents (output_bfd, sec->output_section,
15403 (file_ptr) sec->output_offset, sec->size);
15408 /* Fix code to point to Cortex-A8 erratum stubs. */
15409 if (globals->fix_cortex_a8)
15411 struct a8_branch_to_stub_data data;
15413 data.writing_section = sec;
15414 data.contents = contents;
15416 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15423 if (globals->byteswap_code)
15425 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15428 for (i = 0; i < mapcount; i++)
15430 if (i == mapcount - 1)
15433 end = map[i + 1].vma;
15435 switch (map[i].type)
15438 /* Byte swap code words. */
15439 while (ptr + 3 < end)
15441 tmp = contents[ptr];
15442 contents[ptr] = contents[ptr + 3];
15443 contents[ptr + 3] = tmp;
15444 tmp = contents[ptr + 1];
15445 contents[ptr + 1] = contents[ptr + 2];
15446 contents[ptr + 2] = tmp;
15452 /* Byte swap code halfwords. */
15453 while (ptr + 1 < end)
15455 tmp = contents[ptr];
15456 contents[ptr] = contents[ptr + 1];
15457 contents[ptr + 1] = tmp;
15463 /* Leave data alone. */
15471 arm_data->mapcount = -1;
15472 arm_data->mapsize = 0;
15473 arm_data->map = NULL;
15478 /* Mangle thumb function symbols as we read them in. */
15481 elf32_arm_swap_symbol_in (bfd * abfd,
15484 Elf_Internal_Sym *dst)
15486 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15489 /* New EABI objects mark thumb function symbols by setting the low bit of
15491 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
15492 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15494 if (dst->st_value & 1)
15496 dst->st_value &= ~(bfd_vma) 1;
15497 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15500 dst->st_target_internal = ST_BRANCH_TO_ARM;
15502 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15504 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15505 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15507 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15508 dst->st_target_internal = ST_BRANCH_LONG;
15510 dst->st_target_internal = ST_BRANCH_UNKNOWN;
15516 /* Mangle thumb function symbols as we write them out. */
15519 elf32_arm_swap_symbol_out (bfd *abfd,
15520 const Elf_Internal_Sym *src,
15524 Elf_Internal_Sym newsym;
15526 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15527 of the address set, as per the new EABI. We do this unconditionally
15528 because objcopy does not set the elf header flags until after
15529 it writes out the symbol table. */
15530 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15533 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15534 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15535 if (newsym.st_shndx != SHN_UNDEF)
15537 /* Do this only for defined symbols. At link type, the static
15538 linker will simulate the work of dynamic linker of resolving
15539 symbols and will carry over the thumbness of found symbols to
15540 the output symbol table. It's not clear how it happens, but
15541 the thumbness of undefined symbols can well be different at
15542 runtime, and writing '1' for them will be confusing for users
15543 and possibly for dynamic linker itself.
15545 newsym.st_value |= 1;
15550 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15553 /* Add the PT_ARM_EXIDX program header. */
15556 elf32_arm_modify_segment_map (bfd *abfd,
15557 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15559 struct elf_segment_map *m;
15562 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15563 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15565 /* If there is already a PT_ARM_EXIDX header, then we do not
15566 want to add another one. This situation arises when running
15567 "strip"; the input binary already has the header. */
15568 m = elf_tdata (abfd)->segment_map;
15569 while (m && m->p_type != PT_ARM_EXIDX)
15573 m = (struct elf_segment_map *)
15574 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15577 m->p_type = PT_ARM_EXIDX;
15579 m->sections[0] = sec;
15581 m->next = elf_tdata (abfd)->segment_map;
15582 elf_tdata (abfd)->segment_map = m;
15589 /* We may add a PT_ARM_EXIDX program header. */
15592 elf32_arm_additional_program_headers (bfd *abfd,
15593 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15597 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15598 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15604 /* Hook called by the linker routine which adds symbols from an object
15608 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15609 Elf_Internal_Sym *sym, const char **namep,
15610 flagword *flagsp, asection **secp, bfd_vma *valp)
15612 if ((abfd->flags & DYNAMIC) == 0
15613 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15614 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
15615 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15617 if (elf32_arm_hash_table (info)->vxworks_p
15618 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15619 flagsp, secp, valp))
15625 /* We use this to override swap_symbol_in and swap_symbol_out. */
15626 const struct elf_size_info elf32_arm_size_info =
15628 sizeof (Elf32_External_Ehdr),
15629 sizeof (Elf32_External_Phdr),
15630 sizeof (Elf32_External_Shdr),
15631 sizeof (Elf32_External_Rel),
15632 sizeof (Elf32_External_Rela),
15633 sizeof (Elf32_External_Sym),
15634 sizeof (Elf32_External_Dyn),
15635 sizeof (Elf_External_Note),
15639 ELFCLASS32, EV_CURRENT,
15640 bfd_elf32_write_out_phdrs,
15641 bfd_elf32_write_shdrs_and_ehdr,
15642 bfd_elf32_checksum_contents,
15643 bfd_elf32_write_relocs,
15644 elf32_arm_swap_symbol_in,
15645 elf32_arm_swap_symbol_out,
15646 bfd_elf32_slurp_reloc_table,
15647 bfd_elf32_slurp_symbol_table,
15648 bfd_elf32_swap_dyn_in,
15649 bfd_elf32_swap_dyn_out,
15650 bfd_elf32_swap_reloc_in,
15651 bfd_elf32_swap_reloc_out,
15652 bfd_elf32_swap_reloca_in,
15653 bfd_elf32_swap_reloca_out
15656 #define ELF_ARCH bfd_arch_arm
15657 #define ELF_TARGET_ID ARM_ELF_DATA
15658 #define ELF_MACHINE_CODE EM_ARM
15659 #ifdef __QNXTARGET__
15660 #define ELF_MAXPAGESIZE 0x1000
15662 #define ELF_MAXPAGESIZE 0x8000
15664 #define ELF_MINPAGESIZE 0x1000
15665 #define ELF_COMMONPAGESIZE 0x1000
15667 #define bfd_elf32_mkobject elf32_arm_mkobject
15669 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
15670 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
15671 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
15672 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
15673 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
15674 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
15675 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
15676 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
15677 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
15678 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
15679 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
15680 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
15681 #define bfd_elf32_bfd_final_link elf32_arm_final_link
15683 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
15684 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
15685 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
15686 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
15687 #define elf_backend_check_relocs elf32_arm_check_relocs
15688 #define elf_backend_relocate_section elf32_arm_relocate_section
15689 #define elf_backend_write_section elf32_arm_write_section
15690 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
15691 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
15692 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
15693 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
15694 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
15695 #define elf_backend_always_size_sections elf32_arm_always_size_sections
15696 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
15697 #define elf_backend_post_process_headers elf32_arm_post_process_headers
15698 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
15699 #define elf_backend_object_p elf32_arm_object_p
15700 #define elf_backend_fake_sections elf32_arm_fake_sections
15701 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
15702 #define elf_backend_final_write_processing elf32_arm_final_write_processing
15703 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
15704 #define elf_backend_size_info elf32_arm_size_info
15705 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15706 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
15707 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
15708 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
15709 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
15711 #define elf_backend_can_refcount 1
15712 #define elf_backend_can_gc_sections 1
15713 #define elf_backend_plt_readonly 1
15714 #define elf_backend_want_got_plt 1
15715 #define elf_backend_want_plt_sym 0
15716 #define elf_backend_may_use_rel_p 1
15717 #define elf_backend_may_use_rela_p 0
15718 #define elf_backend_default_use_rela_p 0
15720 #define elf_backend_got_header_size 12
15722 #undef elf_backend_obj_attrs_vendor
15723 #define elf_backend_obj_attrs_vendor "aeabi"
15724 #undef elf_backend_obj_attrs_section
15725 #define elf_backend_obj_attrs_section ".ARM.attributes"
15726 #undef elf_backend_obj_attrs_arg_type
15727 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
15728 #undef elf_backend_obj_attrs_section_type
15729 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
15730 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
15731 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
15733 #include "elf32-target.h"
15735 /* Native Client targets. */
15737 #undef TARGET_LITTLE_SYM
15738 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_nacl_vec
15739 #undef TARGET_LITTLE_NAME
15740 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
15741 #undef TARGET_BIG_SYM
15742 #define TARGET_BIG_SYM bfd_elf32_bigarm_nacl_vec
15743 #undef TARGET_BIG_NAME
15744 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
15746 /* Like elf32_arm_link_hash_table_create -- but overrides
15747 appropriately for NaCl. */
15749 static struct bfd_link_hash_table *
15750 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
15752 struct bfd_link_hash_table *ret;
15754 ret = elf32_arm_link_hash_table_create (abfd);
15757 struct elf32_arm_link_hash_table *htab
15758 = (struct elf32_arm_link_hash_table *) ret;
15762 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
15763 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
15768 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
15769 really need to use elf32_arm_modify_segment_map. But we do it
15770 anyway just to reduce gratuitous differences with the stock ARM backend. */
15773 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
15775 return (elf32_arm_modify_segment_map (abfd, info)
15776 && nacl_modify_segment_map (abfd, info));
15780 #define elf32_bed elf32_arm_nacl_bed
15781 #undef bfd_elf32_bfd_link_hash_table_create
15782 #define bfd_elf32_bfd_link_hash_table_create \
15783 elf32_arm_nacl_link_hash_table_create
15784 #undef elf_backend_plt_alignment
15785 #define elf_backend_plt_alignment 4
15786 #undef elf_backend_modify_segment_map
15787 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
15788 #undef elf_backend_modify_program_headers
15789 #define elf_backend_modify_program_headers nacl_modify_program_headers
15791 #undef ELF_MAXPAGESIZE
15792 #define ELF_MAXPAGESIZE 0x10000
15794 #include "elf32-target.h"
15796 /* Reset to defaults. */
15797 #undef elf_backend_plt_alignment
15798 #undef elf_backend_modify_segment_map
15799 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15800 #undef elf_backend_modify_program_headers
15802 /* VxWorks Targets. */
15804 #undef TARGET_LITTLE_SYM
15805 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
15806 #undef TARGET_LITTLE_NAME
15807 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
15808 #undef TARGET_BIG_SYM
15809 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
15810 #undef TARGET_BIG_NAME
15811 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
15813 /* Like elf32_arm_link_hash_table_create -- but overrides
15814 appropriately for VxWorks. */
15816 static struct bfd_link_hash_table *
15817 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
15819 struct bfd_link_hash_table *ret;
15821 ret = elf32_arm_link_hash_table_create (abfd);
15824 struct elf32_arm_link_hash_table *htab
15825 = (struct elf32_arm_link_hash_table *) ret;
15827 htab->vxworks_p = 1;
15833 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
15835 elf32_arm_final_write_processing (abfd, linker);
15836 elf_vxworks_final_write_processing (abfd, linker);
15840 #define elf32_bed elf32_arm_vxworks_bed
15842 #undef bfd_elf32_bfd_link_hash_table_create
15843 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
15844 #undef elf_backend_final_write_processing
15845 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
15846 #undef elf_backend_emit_relocs
15847 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
15849 #undef elf_backend_may_use_rel_p
15850 #define elf_backend_may_use_rel_p 0
15851 #undef elf_backend_may_use_rela_p
15852 #define elf_backend_may_use_rela_p 1
15853 #undef elf_backend_default_use_rela_p
15854 #define elf_backend_default_use_rela_p 1
15855 #undef elf_backend_want_plt_sym
15856 #define elf_backend_want_plt_sym 1
15857 #undef ELF_MAXPAGESIZE
15858 #define ELF_MAXPAGESIZE 0x1000
15860 #include "elf32-target.h"
15863 /* Merge backend specific data from an object file to the output
15864 object file when linking. */
15867 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
15869 flagword out_flags;
15871 bfd_boolean flags_compatible = TRUE;
15874 /* Check if we have the same endianness. */
15875 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
15878 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
15881 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
15884 /* The input BFD must have had its flags initialised. */
15885 /* The following seems bogus to me -- The flags are initialized in
15886 the assembler but I don't think an elf_flags_init field is
15887 written into the object. */
15888 /* BFD_ASSERT (elf_flags_init (ibfd)); */
15890 in_flags = elf_elfheader (ibfd)->e_flags;
15891 out_flags = elf_elfheader (obfd)->e_flags;
15893 /* In theory there is no reason why we couldn't handle this. However
15894 in practice it isn't even close to working and there is no real
15895 reason to want it. */
15896 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
15897 && !(ibfd->flags & DYNAMIC)
15898 && (in_flags & EF_ARM_BE8))
15900 _bfd_error_handler (_("error: %B is already in final BE8 format"),
15905 if (!elf_flags_init (obfd))
15907 /* If the input is the default architecture and had the default
15908 flags then do not bother setting the flags for the output
15909 architecture, instead allow future merges to do this. If no
15910 future merges ever set these flags then they will retain their
15911 uninitialised values, which surprise surprise, correspond
15912 to the default values. */
15913 if (bfd_get_arch_info (ibfd)->the_default
15914 && elf_elfheader (ibfd)->e_flags == 0)
15917 elf_flags_init (obfd) = TRUE;
15918 elf_elfheader (obfd)->e_flags = in_flags;
15920 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
15921 && bfd_get_arch_info (obfd)->the_default)
15922 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
15927 /* Determine what should happen if the input ARM architecture
15928 does not match the output ARM architecture. */
15929 if (! bfd_arm_merge_machines (ibfd, obfd))
15932 /* Identical flags must be compatible. */
15933 if (in_flags == out_flags)
15936 /* Check to see if the input BFD actually contains any sections. If
15937 not, its flags may not have been initialised either, but it
15938 cannot actually cause any incompatiblity. Do not short-circuit
15939 dynamic objects; their section list may be emptied by
15940 elf_link_add_object_symbols.
15942 Also check to see if there are no code sections in the input.
15943 In this case there is no need to check for code specific flags.
15944 XXX - do we need to worry about floating-point format compatability
15945 in data sections ? */
15946 if (!(ibfd->flags & DYNAMIC))
15948 bfd_boolean null_input_bfd = TRUE;
15949 bfd_boolean only_data_sections = TRUE;
15951 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
15953 /* Ignore synthetic glue sections. */
15954 if (strcmp (sec->name, ".glue_7")
15955 && strcmp (sec->name, ".glue_7t"))
15957 if ((bfd_get_section_flags (ibfd, sec)
15958 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15959 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15960 only_data_sections = FALSE;
15962 null_input_bfd = FALSE;
15967 if (null_input_bfd || only_data_sections)
15971 /* Complain about various flag mismatches. */
15972 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
15973 EF_ARM_EABI_VERSION (out_flags)))
15976 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
15978 (in_flags & EF_ARM_EABIMASK) >> 24,
15979 (out_flags & EF_ARM_EABIMASK) >> 24);
15983 /* Not sure what needs to be checked for EABI versions >= 1. */
15984 /* VxWorks libraries do not use these flags. */
15985 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
15986 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
15987 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
15989 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
15992 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
15994 in_flags & EF_ARM_APCS_26 ? 26 : 32,
15995 out_flags & EF_ARM_APCS_26 ? 26 : 32);
15996 flags_compatible = FALSE;
15999 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
16001 if (in_flags & EF_ARM_APCS_FLOAT)
16003 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
16007 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
16010 flags_compatible = FALSE;
16013 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
16015 if (in_flags & EF_ARM_VFP_FLOAT)
16017 (_("error: %B uses VFP instructions, whereas %B does not"),
16021 (_("error: %B uses FPA instructions, whereas %B does not"),
16024 flags_compatible = FALSE;
16027 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
16029 if (in_flags & EF_ARM_MAVERICK_FLOAT)
16031 (_("error: %B uses Maverick instructions, whereas %B does not"),
16035 (_("error: %B does not use Maverick instructions, whereas %B does"),
16038 flags_compatible = FALSE;
16041 #ifdef EF_ARM_SOFT_FLOAT
16042 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
16044 /* We can allow interworking between code that is VFP format
16045 layout, and uses either soft float or integer regs for
16046 passing floating point arguments and results. We already
16047 know that the APCS_FLOAT flags match; similarly for VFP
16049 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
16050 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
16052 if (in_flags & EF_ARM_SOFT_FLOAT)
16054 (_("error: %B uses software FP, whereas %B uses hardware FP"),
16058 (_("error: %B uses hardware FP, whereas %B uses software FP"),
16061 flags_compatible = FALSE;
16066 /* Interworking mismatch is only a warning. */
16067 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
16069 if (in_flags & EF_ARM_INTERWORK)
16072 (_("Warning: %B supports interworking, whereas %B does not"),
16078 (_("Warning: %B does not support interworking, whereas %B does"),
16084 return flags_compatible;
16088 /* Symbian OS Targets. */
16090 #undef TARGET_LITTLE_SYM
16091 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
16092 #undef TARGET_LITTLE_NAME
16093 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
16094 #undef TARGET_BIG_SYM
16095 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
16096 #undef TARGET_BIG_NAME
16097 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
16099 /* Like elf32_arm_link_hash_table_create -- but overrides
16100 appropriately for Symbian OS. */
16102 static struct bfd_link_hash_table *
16103 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
16105 struct bfd_link_hash_table *ret;
16107 ret = elf32_arm_link_hash_table_create (abfd);
16110 struct elf32_arm_link_hash_table *htab
16111 = (struct elf32_arm_link_hash_table *)ret;
16112 /* There is no PLT header for Symbian OS. */
16113 htab->plt_header_size = 0;
16114 /* The PLT entries are each one instruction and one word. */
16115 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
16116 htab->symbian_p = 1;
16117 /* Symbian uses armv5t or above, so use_blx is always true. */
16119 htab->root.is_relocatable_executable = 1;
16124 static const struct bfd_elf_special_section
16125 elf32_arm_symbian_special_sections[] =
16127 /* In a BPABI executable, the dynamic linking sections do not go in
16128 the loadable read-only segment. The post-linker may wish to
16129 refer to these sections, but they are not part of the final
16131 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
16132 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
16133 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
16134 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
16135 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
16136 /* These sections do not need to be writable as the SymbianOS
16137 postlinker will arrange things so that no dynamic relocation is
16139 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
16140 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
16141 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
16142 { NULL, 0, 0, 0, 0 }
16146 elf32_arm_symbian_begin_write_processing (bfd *abfd,
16147 struct bfd_link_info *link_info)
16149 /* BPABI objects are never loaded directly by an OS kernel; they are
16150 processed by a postlinker first, into an OS-specific format. If
16151 the D_PAGED bit is set on the file, BFD will align segments on
16152 page boundaries, so that an OS can directly map the file. With
16153 BPABI objects, that just results in wasted space. In addition,
16154 because we clear the D_PAGED bit, map_sections_to_segments will
16155 recognize that the program headers should not be mapped into any
16156 loadable segment. */
16157 abfd->flags &= ~D_PAGED;
16158 elf32_arm_begin_write_processing (abfd, link_info);
16162 elf32_arm_symbian_modify_segment_map (bfd *abfd,
16163 struct bfd_link_info *info)
16165 struct elf_segment_map *m;
16168 /* BPABI shared libraries and executables should have a PT_DYNAMIC
16169 segment. However, because the .dynamic section is not marked
16170 with SEC_LOAD, the generic ELF code will not create such a
16172 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
16175 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
16176 if (m->p_type == PT_DYNAMIC)
16181 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
16182 m->next = elf_tdata (abfd)->segment_map;
16183 elf_tdata (abfd)->segment_map = m;
16187 /* Also call the generic arm routine. */
16188 return elf32_arm_modify_segment_map (abfd, info);
16191 /* Return address for Ith PLT stub in section PLT, for relocation REL
16192 or (bfd_vma) -1 if it should not be included. */
16195 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
16196 const arelent *rel ATTRIBUTE_UNUSED)
16198 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
16203 #define elf32_bed elf32_arm_symbian_bed
16205 /* The dynamic sections are not allocated on SymbianOS; the postlinker
16206 will process them and then discard them. */
16207 #undef ELF_DYNAMIC_SEC_FLAGS
16208 #define ELF_DYNAMIC_SEC_FLAGS \
16209 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
16211 #undef elf_backend_emit_relocs
16213 #undef bfd_elf32_bfd_link_hash_table_create
16214 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
16215 #undef elf_backend_special_sections
16216 #define elf_backend_special_sections elf32_arm_symbian_special_sections
16217 #undef elf_backend_begin_write_processing
16218 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
16219 #undef elf_backend_final_write_processing
16220 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16222 #undef elf_backend_modify_segment_map
16223 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
16225 /* There is no .got section for BPABI objects, and hence no header. */
16226 #undef elf_backend_got_header_size
16227 #define elf_backend_got_header_size 0
16229 /* Similarly, there is no .got.plt section. */
16230 #undef elf_backend_want_got_plt
16231 #define elf_backend_want_got_plt 0
16233 #undef elf_backend_plt_sym_val
16234 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
16236 #undef elf_backend_may_use_rel_p
16237 #define elf_backend_may_use_rel_p 1
16238 #undef elf_backend_may_use_rela_p
16239 #define elf_backend_may_use_rela_p 0
16240 #undef elf_backend_default_use_rela_p
16241 #define elf_backend_default_use_rela_p 0
16242 #undef elf_backend_want_plt_sym
16243 #define elf_backend_want_plt_sym 0
16244 #undef ELF_MAXPAGESIZE
16245 #define ELF_MAXPAGESIZE 0x8000
16247 #include "elf32-target.h"