1 /* Renesas RX specific support for 32-bit ELF.
2 Copyright (C) 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #include "bfd_stdint.h"
27 #include "libiberty.h"
29 #define RX_OPCODE_BIG_ENDIAN 0
32 char * rx_get_reloc (long);
33 void rx_dump_symtab (bfd *, void *, void *);
36 #define RXREL(n,sz,bit,shift,complain,pcrel) \
37 HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
38 bfd_elf_generic_reloc, "R_RX_" #n, FALSE, 0, ~0, FALSE)
40 /* Note that the relocations around 0x7f are internal to this file;
41 feel free to move them as needed to avoid conflicts with published
42 relocation numbers. */
44 static reloc_howto_type rx_elf_howto_table [] =
46 RXREL (NONE, 0, 0, 0, dont, FALSE),
47 RXREL (DIR32, 2, 32, 0, signed, FALSE),
48 RXREL (DIR24S, 2, 24, 0, signed, FALSE),
49 RXREL (DIR16, 1, 16, 0, dont, FALSE),
50 RXREL (DIR16U, 1, 16, 0, unsigned, FALSE),
51 RXREL (DIR16S, 1, 16, 0, signed, FALSE),
52 RXREL (DIR8, 0, 8, 0, dont, FALSE),
53 RXREL (DIR8U, 0, 8, 0, unsigned, FALSE),
54 RXREL (DIR8S, 0, 8, 0, signed, FALSE),
55 RXREL (DIR24S_PCREL, 2, 24, 0, signed, TRUE),
56 RXREL (DIR16S_PCREL, 1, 16, 0, signed, TRUE),
57 RXREL (DIR8S_PCREL, 0, 8, 0, signed, TRUE),
58 RXREL (DIR16UL, 1, 16, 2, unsigned, FALSE),
59 RXREL (DIR16UW, 1, 16, 1, unsigned, FALSE),
60 RXREL (DIR8UL, 0, 8, 2, unsigned, FALSE),
61 RXREL (DIR8UW, 0, 8, 1, unsigned, FALSE),
62 RXREL (DIR32_REV, 1, 16, 0, dont, FALSE),
63 RXREL (DIR16_REV, 1, 16, 0, dont, FALSE),
64 RXREL (DIR3U_PCREL, 0, 3, 0, dont, TRUE),
80 RXREL (RH_3_PCREL, 0, 3, 0, signed, TRUE),
81 RXREL (RH_16_OP, 1, 16, 0, signed, FALSE),
82 RXREL (RH_24_OP, 2, 24, 0, signed, FALSE),
83 RXREL (RH_32_OP, 2, 32, 0, signed, FALSE),
84 RXREL (RH_24_UNS, 2, 24, 0, unsigned, FALSE),
85 RXREL (RH_8_NEG, 0, 8, 0, signed, FALSE),
86 RXREL (RH_16_NEG, 1, 16, 0, signed, FALSE),
87 RXREL (RH_24_NEG, 2, 24, 0, signed, FALSE),
88 RXREL (RH_32_NEG, 2, 32, 0, signed, FALSE),
89 RXREL (RH_DIFF, 2, 32, 0, signed, FALSE),
90 RXREL (RH_GPRELB, 1, 16, 0, unsigned, FALSE),
91 RXREL (RH_GPRELW, 1, 16, 0, unsigned, FALSE),
92 RXREL (RH_GPRELL, 1, 16, 0, unsigned, FALSE),
93 RXREL (RH_RELAX, 0, 0, 0, dont, FALSE),
115 RXREL (ABS32, 2, 32, 0, dont, FALSE),
116 RXREL (ABS24S, 2, 24, 0, signed, FALSE),
117 RXREL (ABS16, 1, 16, 0, dont, FALSE),
118 RXREL (ABS16U, 1, 16, 0, unsigned, FALSE),
119 RXREL (ABS16S, 1, 16, 0, signed, FALSE),
120 RXREL (ABS8, 0, 8, 0, dont, FALSE),
121 RXREL (ABS8U, 0, 8, 0, unsigned, FALSE),
122 RXREL (ABS8S, 0, 8, 0, signed, FALSE),
123 RXREL (ABS24S_PCREL, 2, 24, 0, signed, TRUE),
124 RXREL (ABS16S_PCREL, 1, 16, 0, signed, TRUE),
125 RXREL (ABS8S_PCREL, 0, 8, 0, signed, TRUE),
126 RXREL (ABS16UL, 1, 16, 0, unsigned, FALSE),
127 RXREL (ABS16UW, 1, 16, 0, unsigned, FALSE),
128 RXREL (ABS8UL, 0, 8, 0, unsigned, FALSE),
129 RXREL (ABS8UW, 0, 8, 0, unsigned, FALSE),
130 RXREL (ABS32_REV, 2, 32, 0, dont, FALSE),
131 RXREL (ABS16_REV, 1, 16, 0, dont, FALSE),
133 #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
174 /* These are internal. */
175 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
176 /* ---- ---- 4--- 3210. */
177 #define R_RX_RH_ABS5p8B 0x78
178 RXREL (RH_ABS5p8B, 0, 0, 0, dont, FALSE),
179 #define R_RX_RH_ABS5p8W 0x79
180 RXREL (RH_ABS5p8W, 0, 0, 0, dont, FALSE),
181 #define R_RX_RH_ABS5p8L 0x7a
182 RXREL (RH_ABS5p8L, 0, 0, 0, dont, FALSE),
183 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
184 /* ---- -432 1--- 0---. */
185 #define R_RX_RH_ABS5p5B 0x7b
186 RXREL (RH_ABS5p5B, 0, 0, 0, dont, FALSE),
187 #define R_RX_RH_ABS5p5W 0x7c
188 RXREL (RH_ABS5p5W, 0, 0, 0, dont, FALSE),
189 #define R_RX_RH_ABS5p5L 0x7d
190 RXREL (RH_ABS5p5L, 0, 0, 0, dont, FALSE),
191 /* A 4-bit unsigned immediate at bit position 8. */
192 #define R_RX_RH_UIMM4p8 0x7e
193 RXREL (RH_UIMM4p8, 0, 0, 0, dont, FALSE),
194 /* A 4-bit negative unsigned immediate at bit position 8. */
195 #define R_RX_RH_UNEG4p8 0x7f
196 RXREL (RH_UNEG4p8, 0, 0, 0, dont, FALSE),
197 /* End of internal relocs. */
199 RXREL (SYM, 2, 32, 0, dont, FALSE),
200 RXREL (OPneg, 2, 32, 0, dont, FALSE),
201 RXREL (OPadd, 2, 32, 0, dont, FALSE),
202 RXREL (OPsub, 2, 32, 0, dont, FALSE),
203 RXREL (OPmul, 2, 32, 0, dont, FALSE),
204 RXREL (OPdiv, 2, 32, 0, dont, FALSE),
205 RXREL (OPshla, 2, 32, 0, dont, FALSE),
206 RXREL (OPshra, 2, 32, 0, dont, FALSE),
207 RXREL (OPsctsize, 2, 32, 0, dont, FALSE),
208 RXREL (OPscttop, 2, 32, 0, dont, FALSE),
209 RXREL (OPand, 2, 32, 0, dont, FALSE),
210 RXREL (OPor, 2, 32, 0, dont, FALSE),
211 RXREL (OPxor, 2, 32, 0, dont, FALSE),
212 RXREL (OPnot, 2, 32, 0, dont, FALSE),
213 RXREL (OPmod, 2, 32, 0, dont, FALSE),
214 RXREL (OPromtop, 2, 32, 0, dont, FALSE),
215 RXREL (OPramtop, 2, 32, 0, dont, FALSE)
218 /* Map BFD reloc types to RX ELF reloc types. */
222 bfd_reloc_code_real_type bfd_reloc_val;
223 unsigned int rx_reloc_val;
226 static const struct rx_reloc_map rx_reloc_map [] =
228 { BFD_RELOC_NONE, R_RX_NONE },
229 { BFD_RELOC_8, R_RX_DIR8S },
230 { BFD_RELOC_16, R_RX_DIR16S },
231 { BFD_RELOC_24, R_RX_DIR24S },
232 { BFD_RELOC_32, R_RX_DIR32 },
233 { BFD_RELOC_RX_16_OP, R_RX_DIR16 },
234 { BFD_RELOC_RX_DIR3U_PCREL, R_RX_DIR3U_PCREL },
235 { BFD_RELOC_8_PCREL, R_RX_DIR8S_PCREL },
236 { BFD_RELOC_16_PCREL, R_RX_DIR16S_PCREL },
237 { BFD_RELOC_24_PCREL, R_RX_DIR24S_PCREL },
238 { BFD_RELOC_RX_8U, R_RX_DIR8U },
239 { BFD_RELOC_RX_16U, R_RX_DIR16U },
240 { BFD_RELOC_RX_24U, R_RX_RH_24_UNS },
241 { BFD_RELOC_RX_NEG8, R_RX_RH_8_NEG },
242 { BFD_RELOC_RX_NEG16, R_RX_RH_16_NEG },
243 { BFD_RELOC_RX_NEG24, R_RX_RH_24_NEG },
244 { BFD_RELOC_RX_NEG32, R_RX_RH_32_NEG },
245 { BFD_RELOC_RX_DIFF, R_RX_RH_DIFF },
246 { BFD_RELOC_RX_GPRELB, R_RX_RH_GPRELB },
247 { BFD_RELOC_RX_GPRELW, R_RX_RH_GPRELW },
248 { BFD_RELOC_RX_GPRELL, R_RX_RH_GPRELL },
249 { BFD_RELOC_RX_RELAX, R_RX_RH_RELAX },
250 { BFD_RELOC_RX_SYM, R_RX_SYM },
251 { BFD_RELOC_RX_OP_SUBTRACT, R_RX_OPsub },
252 { BFD_RELOC_RX_OP_NEG, R_RX_OPneg },
253 { BFD_RELOC_RX_ABS8, R_RX_ABS8 },
254 { BFD_RELOC_RX_ABS16, R_RX_ABS16 },
255 { BFD_RELOC_RX_ABS16_REV, R_RX_ABS16_REV },
256 { BFD_RELOC_RX_ABS32, R_RX_ABS32 },
257 { BFD_RELOC_RX_ABS32_REV, R_RX_ABS32_REV },
258 { BFD_RELOC_RX_ABS16UL, R_RX_ABS16UL },
259 { BFD_RELOC_RX_ABS16UW, R_RX_ABS16UW },
260 { BFD_RELOC_RX_ABS16U, R_RX_ABS16U }
263 #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
265 static reloc_howto_type *
266 rx_reloc_type_lookup (bfd * abfd ATTRIBUTE_UNUSED,
267 bfd_reloc_code_real_type code)
271 if (code == BFD_RELOC_RX_32_OP)
272 return rx_elf_howto_table + R_RX_DIR32;
274 for (i = ARRAY_SIZE (rx_reloc_map); --i;)
275 if (rx_reloc_map [i].bfd_reloc_val == code)
276 return rx_elf_howto_table + rx_reloc_map[i].rx_reloc_val;
281 static reloc_howto_type *
282 rx_reloc_name_lookup (bfd * abfd ATTRIBUTE_UNUSED, const char * r_name)
286 for (i = 0; i < ARRAY_SIZE (rx_elf_howto_table); i++)
287 if (rx_elf_howto_table[i].name != NULL
288 && strcasecmp (rx_elf_howto_table[i].name, r_name) == 0)
289 return rx_elf_howto_table + i;
294 /* Set the howto pointer for an RX ELF reloc. */
297 rx_info_to_howto_rela (bfd * abfd ATTRIBUTE_UNUSED,
299 Elf_Internal_Rela * dst)
303 r_type = ELF32_R_TYPE (dst->r_info);
304 BFD_ASSERT (r_type < (unsigned int) R_RX_max);
305 cache_ptr->howto = rx_elf_howto_table + r_type;
309 get_symbol_value (const char * name,
310 bfd_reloc_status_type * status,
311 struct bfd_link_info * info,
313 asection * input_section,
317 struct bfd_link_hash_entry * h;
319 h = bfd_link_hash_lookup (info->hash, name, FALSE, FALSE, TRUE);
322 || (h->type != bfd_link_hash_defined
323 && h->type != bfd_link_hash_defweak))
324 * status = info->callbacks->undefined_symbol
325 (info, name, input_bfd, input_section, offset, TRUE);
327 value = (h->u.def.value
328 + h->u.def.section->output_section->vma
329 + h->u.def.section->output_offset);
335 get_gp (bfd_reloc_status_type * status,
336 struct bfd_link_info * info,
341 static bfd_boolean cached = FALSE;
342 static bfd_vma cached_value = 0;
346 cached_value = get_symbol_value ("__gp", status, info, abfd, sec, offset);
353 get_romstart (bfd_reloc_status_type * status,
354 struct bfd_link_info * info,
359 static bfd_boolean cached = FALSE;
360 static bfd_vma cached_value = 0;
364 cached_value = get_symbol_value ("_start", status, info, abfd, sec, offset);
371 get_ramstart (bfd_reloc_status_type * status,
372 struct bfd_link_info * info,
377 static bfd_boolean cached = FALSE;
378 static bfd_vma cached_value = 0;
382 cached_value = get_symbol_value ("__datastart", status, info, abfd, sec, offset);
388 #define NUM_STACK_ENTRIES 16
389 static int32_t rx_stack [ NUM_STACK_ENTRIES ];
390 static unsigned int rx_stack_top;
392 #define RX_STACK_PUSH(val) \
395 if (rx_stack_top < NUM_STACK_ENTRIES) \
396 rx_stack [rx_stack_top ++] = (val); \
398 r = bfd_reloc_dangerous; \
402 #define RX_STACK_POP(dest) \
405 if (rx_stack_top > 0) \
406 (dest) = rx_stack [-- rx_stack_top]; \
408 (dest) = 0, r = bfd_reloc_dangerous; \
412 /* Relocate an RX ELF section.
413 There is some attempt to make this function usable for many architectures,
414 both USE_REL and USE_RELA ['twould be nice if such a critter existed],
415 if only to serve as a learning tool.
417 The RELOCATE_SECTION function is called by the new ELF backend linker
418 to handle the relocations for a section.
420 The relocs are always passed as Rela structures; if the section
421 actually uses Rel structures, the r_addend field will always be
424 This function is responsible for adjusting the section contents as
425 necessary, and (if using Rela relocs and generating a relocatable
426 output file) adjusting the reloc addend as necessary.
428 This function does not have to worry about setting the reloc
429 address or the reloc symbol index.
431 LOCAL_SYMS is a pointer to the swapped in local symbols.
433 LOCAL_SECTIONS is an array giving the section in the input file
434 corresponding to the st_shndx field of each local symbol.
436 The global hash table entry for the global symbols can be found
437 via elf_sym_hashes (input_bfd).
439 When generating relocatable output, this function must handle
440 STB_LOCAL/STT_SECTION symbols specially. The output symbol is
441 going to be the section symbol corresponding to the output
442 section, which means that the addend must be adjusted
446 rx_elf_relocate_section
448 struct bfd_link_info * info,
450 asection * input_section,
452 Elf_Internal_Rela * relocs,
453 Elf_Internal_Sym * local_syms,
454 asection ** local_sections)
456 Elf_Internal_Shdr * symtab_hdr;
457 struct elf_link_hash_entry ** sym_hashes;
458 Elf_Internal_Rela * rel;
459 Elf_Internal_Rela * relend;
461 symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr;
462 sym_hashes = elf_sym_hashes (input_bfd);
463 relend = relocs + input_section->reloc_count;
464 for (rel = relocs; rel < relend; rel ++)
466 reloc_howto_type * howto;
467 unsigned long r_symndx;
468 Elf_Internal_Sym * sym;
470 struct elf_link_hash_entry * h;
472 bfd_reloc_status_type r;
473 const char * name = NULL;
474 bfd_boolean unresolved_reloc = TRUE;
477 r_type = ELF32_R_TYPE (rel->r_info);
478 r_symndx = ELF32_R_SYM (rel->r_info);
480 howto = rx_elf_howto_table + ELF32_R_TYPE (rel->r_info);
486 if (r_symndx < symtab_hdr->sh_info)
488 sym = local_syms + r_symndx;
489 sec = local_sections [r_symndx];
490 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, & sec, rel);
492 name = bfd_elf_string_from_elf_section
493 (input_bfd, symtab_hdr->sh_link, sym->st_name);
494 name = (sym->st_name == 0) ? bfd_section_name (input_bfd, sec) : name;
500 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
501 r_symndx, symtab_hdr, sym_hashes, h,
502 sec, relocation, unresolved_reloc,
505 name = h->root.root.string;
508 if (sec != NULL && elf_discarded_section (sec))
509 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
510 rel, relend, howto, contents);
512 if (info->relocatable)
514 /* This is a relocatable link. We don't have to change
515 anything, unless the reloc is against a section symbol,
516 in which case we have to adjust according to where the
517 section symbol winds up in the output section. */
518 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
519 rel->r_addend += sec->output_offset;
523 if (h != NULL && h->root.type == bfd_link_hash_undefweak)
524 /* If the symbol is undefined and weak
525 then the relocation resolves to zero. */
529 if (howto->pc_relative)
531 relocation -= (input_section->output_section->vma
532 + input_section->output_offset
534 if (r_type != R_RX_RH_3_PCREL
535 && r_type != R_RX_DIR3U_PCREL)
539 relocation += rel->r_addend;
544 #define RANGE(a,b) if (a > (long) relocation || (long) relocation > b) r = bfd_reloc_overflow
545 #define ALIGN(m) if (relocation & m) r = bfd_reloc_other;
546 #define OP(i) (contents[rel->r_offset + (i)])
547 #define WARN_REDHAT(type) \
548 _bfd_error_handler (_("%B:%A: Warning: deprecated Red Hat reloc " type " detected against: %s."), \
549 input_bfd, input_section, name)
551 /* Opcode relocs are always big endian. Data relocs are bi-endian. */
560 case R_RX_RH_3_PCREL:
561 WARN_REDHAT ("RX_RH_3_PCREL");
564 OP (0) |= relocation & 0x07;
568 WARN_REDHAT ("RX_RH_8_NEG");
569 relocation = - relocation;
570 case R_RX_DIR8S_PCREL:
586 WARN_REDHAT ("RX_RH_16_NEG");
587 relocation = - relocation;
588 case R_RX_DIR16S_PCREL:
589 RANGE (-32768, 32767);
590 #if RX_OPCODE_BIG_ENDIAN
593 OP (1) = relocation >> 8;
598 WARN_REDHAT ("RX_RH_16_OP");
599 RANGE (-32768, 32767);
600 #if RX_OPCODE_BIG_ENDIAN
602 OP (0) = relocation >> 8;
605 OP (1) = relocation >> 8;
610 RANGE (-32768, 65535);
611 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
614 OP (0) = relocation >> 8;
619 OP (1) = relocation >> 8;
625 #if RX_OPCODE_BIG_ENDIAN
627 OP (0) = relocation >> 8;
630 OP (1) = relocation >> 8;
635 RANGE (-32768, 65536);
636 #if RX_OPCODE_BIG_ENDIAN
638 OP (0) = relocation >> 8;
641 OP (1) = relocation >> 8;
646 RANGE (-32768, 65536);
647 #if RX_OPCODE_BIG_ENDIAN
649 OP (1) = relocation >> 8;
652 OP (0) = relocation >> 8;
656 case R_RX_DIR3U_PCREL:
659 OP (0) |= relocation & 0x07;
663 WARN_REDHAT ("RX_RH_24_NEG");
664 relocation = - relocation;
665 case R_RX_DIR24S_PCREL:
666 RANGE (-0x800000, 0x7fffff);
667 #if RX_OPCODE_BIG_ENDIAN
669 OP (1) = relocation >> 8;
670 OP (0) = relocation >> 16;
673 OP (1) = relocation >> 8;
674 OP (2) = relocation >> 16;
679 WARN_REDHAT ("RX_RH_24_OP");
680 RANGE (-0x800000, 0x7fffff);
681 #if RX_OPCODE_BIG_ENDIAN
683 OP (1) = relocation >> 8;
684 OP (0) = relocation >> 16;
687 OP (1) = relocation >> 8;
688 OP (2) = relocation >> 16;
693 RANGE (-0x800000, 0x7fffff);
694 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
697 OP (1) = relocation >> 8;
698 OP (0) = relocation >> 16;
703 OP (1) = relocation >> 8;
704 OP (2) = relocation >> 16;
709 WARN_REDHAT ("RX_RH_24_UNS");
711 #if RX_OPCODE_BIG_ENDIAN
713 OP (1) = relocation >> 8;
714 OP (0) = relocation >> 16;
717 OP (1) = relocation >> 8;
718 OP (2) = relocation >> 16;
723 WARN_REDHAT ("RX_RH_32_NEG");
724 relocation = - relocation;
725 #if RX_OPCODE_BIG_ENDIAN
727 OP (2) = relocation >> 8;
728 OP (1) = relocation >> 16;
729 OP (0) = relocation >> 24;
732 OP (1) = relocation >> 8;
733 OP (2) = relocation >> 16;
734 OP (3) = relocation >> 24;
739 WARN_REDHAT ("RX_RH_32_OP");
740 #if RX_OPCODE_BIG_ENDIAN
742 OP (2) = relocation >> 8;
743 OP (1) = relocation >> 16;
744 OP (0) = relocation >> 24;
747 OP (1) = relocation >> 8;
748 OP (2) = relocation >> 16;
749 OP (3) = relocation >> 24;
754 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
757 OP (2) = relocation >> 8;
758 OP (1) = relocation >> 16;
759 OP (0) = relocation >> 24;
764 OP (1) = relocation >> 8;
765 OP (2) = relocation >> 16;
766 OP (3) = relocation >> 24;
771 if (BIGE (output_bfd))
774 OP (1) = relocation >> 8;
775 OP (2) = relocation >> 16;
776 OP (3) = relocation >> 24;
781 OP (2) = relocation >> 8;
782 OP (1) = relocation >> 16;
783 OP (0) = relocation >> 24;
790 WARN_REDHAT ("RX_RH_DIFF");
791 val = bfd_get_32 (output_bfd, & OP (0));
793 bfd_put_32 (output_bfd, val, & OP (0));
798 WARN_REDHAT ("RX_RH_GPRELB");
799 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
801 #if RX_OPCODE_BIG_ENDIAN
803 OP (0) = relocation >> 8;
806 OP (1) = relocation >> 8;
811 WARN_REDHAT ("RX_RH_GPRELW");
812 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
816 #if RX_OPCODE_BIG_ENDIAN
818 OP (0) = relocation >> 8;
821 OP (1) = relocation >> 8;
826 WARN_REDHAT ("RX_RH_GPRELL");
827 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
831 #if RX_OPCODE_BIG_ENDIAN
833 OP (0) = relocation >> 8;
836 OP (1) = relocation >> 8;
840 /* Internal relocations just for relaxation: */
841 case R_RX_RH_ABS5p5B:
842 RX_STACK_POP (relocation);
845 OP (0) |= relocation >> 2;
847 OP (1) |= (relocation << 6) & 0x80;
848 OP (1) |= (relocation << 3) & 0x08;
851 case R_RX_RH_ABS5p5W:
852 RX_STACK_POP (relocation);
857 OP (0) |= relocation >> 2;
859 OP (1) |= (relocation << 6) & 0x80;
860 OP (1) |= (relocation << 3) & 0x08;
863 case R_RX_RH_ABS5p5L:
864 RX_STACK_POP (relocation);
869 OP (0) |= relocation >> 2;
871 OP (1) |= (relocation << 6) & 0x80;
872 OP (1) |= (relocation << 3) & 0x08;
875 case R_RX_RH_ABS5p8B:
876 RX_STACK_POP (relocation);
879 OP (0) |= (relocation << 3) & 0x80;
880 OP (0) |= relocation & 0x0f;
883 case R_RX_RH_ABS5p8W:
884 RX_STACK_POP (relocation);
889 OP (0) |= (relocation << 3) & 0x80;
890 OP (0) |= relocation & 0x0f;
893 case R_RX_RH_ABS5p8L:
894 RX_STACK_POP (relocation);
899 OP (0) |= (relocation << 3) & 0x80;
900 OP (0) |= relocation & 0x0f;
903 case R_RX_RH_UIMM4p8:
906 OP (0) |= relocation << 4;
909 case R_RX_RH_UNEG4p8:
912 OP (0) |= (-relocation) << 4;
915 /* Complex reloc handling: */
918 RX_STACK_POP (relocation);
919 #if RX_OPCODE_BIG_ENDIAN
921 OP (2) = relocation >> 8;
922 OP (1) = relocation >> 16;
923 OP (0) = relocation >> 24;
926 OP (1) = relocation >> 8;
927 OP (2) = relocation >> 16;
928 OP (3) = relocation >> 24;
933 RX_STACK_POP (relocation);
934 #if RX_OPCODE_BIG_ENDIAN
936 OP (1) = relocation >> 8;
937 OP (2) = relocation >> 16;
938 OP (3) = relocation >> 24;
941 OP (2) = relocation >> 8;
942 OP (1) = relocation >> 16;
943 OP (0) = relocation >> 24;
947 case R_RX_ABS24S_PCREL:
949 RX_STACK_POP (relocation);
950 RANGE (-0x800000, 0x7fffff);
951 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
954 OP (1) = relocation >> 8;
955 OP (0) = relocation >> 16;
960 OP (1) = relocation >> 8;
961 OP (2) = relocation >> 16;
966 RX_STACK_POP (relocation);
967 RANGE (-32768, 65535);
968 #if RX_OPCODE_BIG_ENDIAN
970 OP (0) = relocation >> 8;
973 OP (1) = relocation >> 8;
978 RX_STACK_POP (relocation);
979 RANGE (-32768, 65535);
980 #if RX_OPCODE_BIG_ENDIAN
982 OP (1) = relocation >> 8;
985 OP (0) = relocation >> 8;
989 case R_RX_ABS16S_PCREL:
991 RX_STACK_POP (relocation);
992 RANGE (-32768, 32767);
993 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
996 OP (0) = relocation >> 8;
1000 OP (0) = relocation;
1001 OP (1) = relocation >> 8;
1006 RX_STACK_POP (relocation);
1008 #if RX_OPCODE_BIG_ENDIAN
1009 OP (1) = relocation;
1010 OP (0) = relocation >> 8;
1012 OP (0) = relocation;
1013 OP (1) = relocation >> 8;
1018 RX_STACK_POP (relocation);
1021 #if RX_OPCODE_BIG_ENDIAN
1022 OP (1) = relocation;
1023 OP (0) = relocation >> 8;
1025 OP (0) = relocation;
1026 OP (1) = relocation >> 8;
1031 RX_STACK_POP (relocation);
1034 #if RX_OPCODE_BIG_ENDIAN
1035 OP (1) = relocation;
1036 OP (0) = relocation >> 8;
1038 OP (0) = relocation;
1039 OP (1) = relocation >> 8;
1044 RX_STACK_POP (relocation);
1046 OP (0) = relocation;
1050 RX_STACK_POP (relocation);
1052 OP (0) = relocation;
1056 RX_STACK_POP (relocation);
1059 OP (0) = relocation;
1063 RX_STACK_POP (relocation);
1066 OP (0) = relocation;
1069 case R_RX_ABS8S_PCREL:
1071 RX_STACK_POP (relocation);
1073 OP (0) = relocation;
1077 if (r_symndx < symtab_hdr->sh_info)
1078 RX_STACK_PUSH (sec->output_section->vma
1079 + sec->output_offset
1084 && (h->root.type == bfd_link_hash_defined
1085 || h->root.type == bfd_link_hash_defweak))
1086 RX_STACK_PUSH (h->root.u.def.value
1087 + sec->output_section->vma
1088 + sec->output_offset);
1090 _bfd_error_handler (_("Warning: RX_SYM reloc with an unknown symbol"));
1100 RX_STACK_PUSH (tmp);
1108 RX_STACK_POP (tmp1);
1109 RX_STACK_POP (tmp2);
1111 RX_STACK_PUSH (tmp1);
1119 RX_STACK_POP (tmp1);
1120 RX_STACK_POP (tmp2);
1122 RX_STACK_PUSH (tmp2);
1130 RX_STACK_POP (tmp1);
1131 RX_STACK_POP (tmp2);
1133 RX_STACK_PUSH (tmp1);
1141 RX_STACK_POP (tmp1);
1142 RX_STACK_POP (tmp2);
1144 RX_STACK_PUSH (tmp1);
1152 RX_STACK_POP (tmp1);
1153 RX_STACK_POP (tmp2);
1155 RX_STACK_PUSH (tmp1);
1163 RX_STACK_POP (tmp1);
1164 RX_STACK_POP (tmp2);
1166 RX_STACK_PUSH (tmp1);
1170 case R_RX_OPsctsize:
1171 RX_STACK_PUSH (input_section->size);
1175 RX_STACK_PUSH (input_section->output_section->vma);
1182 RX_STACK_POP (tmp1);
1183 RX_STACK_POP (tmp2);
1185 RX_STACK_PUSH (tmp1);
1193 RX_STACK_POP (tmp1);
1194 RX_STACK_POP (tmp2);
1196 RX_STACK_PUSH (tmp1);
1204 RX_STACK_POP (tmp1);
1205 RX_STACK_POP (tmp2);
1207 RX_STACK_PUSH (tmp1);
1217 RX_STACK_PUSH (tmp);
1225 RX_STACK_POP (tmp1);
1226 RX_STACK_POP (tmp2);
1228 RX_STACK_PUSH (tmp1);
1233 RX_STACK_PUSH (get_romstart (&r, info, input_bfd, input_section, rel->r_offset));
1237 RX_STACK_PUSH (get_ramstart (&r, info, input_bfd, input_section, rel->r_offset));
1241 r = bfd_reloc_notsupported;
1245 if (r != bfd_reloc_ok)
1247 const char * msg = NULL;
1251 case bfd_reloc_overflow:
1252 /* Catch the case of a missing function declaration
1253 and emit a more helpful error message. */
1254 if (r_type == R_RX_DIR24S_PCREL)
1255 msg = _("%B(%A): error: call to undefined function '%s'");
1257 r = info->callbacks->reloc_overflow
1258 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
1259 input_bfd, input_section, rel->r_offset);
1262 case bfd_reloc_undefined:
1263 r = info->callbacks->undefined_symbol
1264 (info, name, input_bfd, input_section, rel->r_offset,
1268 case bfd_reloc_other:
1269 msg = _("%B(%A): warning: unaligned access to symbol '%s' in the small data area");
1272 case bfd_reloc_outofrange:
1273 msg = _("%B(%A): internal error: out of range error");
1276 case bfd_reloc_notsupported:
1277 msg = _("%B(%A): internal error: unsupported relocation error");
1280 case bfd_reloc_dangerous:
1281 msg = _("%B(%A): internal error: dangerous relocation");
1285 msg = _("%B(%A): internal error: unknown error");
1290 _bfd_error_handler (msg, input_bfd, input_section, name);
1300 /* Relaxation Support. */
1302 /* Progression of relocations from largest operand size to smallest
1306 next_smaller_reloc (int r)
1310 case R_RX_DIR32: return R_RX_DIR24S;
1311 case R_RX_DIR24S: return R_RX_DIR16S;
1312 case R_RX_DIR16S: return R_RX_DIR8S;
1313 case R_RX_DIR8S: return R_RX_NONE;
1315 case R_RX_DIR16: return R_RX_DIR8;
1316 case R_RX_DIR8: return R_RX_NONE;
1318 case R_RX_DIR16U: return R_RX_DIR8U;
1319 case R_RX_DIR8U: return R_RX_NONE;
1321 case R_RX_DIR24S_PCREL: return R_RX_DIR16S_PCREL;
1322 case R_RX_DIR16S_PCREL: return R_RX_DIR8S_PCREL;
1323 case R_RX_DIR8S_PCREL: return R_RX_DIR3U_PCREL;
1325 case R_RX_DIR16UL: return R_RX_DIR8UL;
1326 case R_RX_DIR8UL: return R_RX_NONE;
1327 case R_RX_DIR16UW: return R_RX_DIR8UW;
1328 case R_RX_DIR8UW: return R_RX_NONE;
1330 case R_RX_RH_32_OP: return R_RX_RH_24_OP;
1331 case R_RX_RH_24_OP: return R_RX_RH_16_OP;
1332 case R_RX_RH_16_OP: return R_RX_DIR8;
1334 case R_RX_ABS32: return R_RX_ABS24S;
1335 case R_RX_ABS24S: return R_RX_ABS16S;
1336 case R_RX_ABS16: return R_RX_ABS8;
1337 case R_RX_ABS16U: return R_RX_ABS8U;
1338 case R_RX_ABS16S: return R_RX_ABS8S;
1339 case R_RX_ABS8: return R_RX_NONE;
1340 case R_RX_ABS8U: return R_RX_NONE;
1341 case R_RX_ABS8S: return R_RX_NONE;
1342 case R_RX_ABS24S_PCREL: return R_RX_ABS16S_PCREL;
1343 case R_RX_ABS16S_PCREL: return R_RX_ABS8S_PCREL;
1344 case R_RX_ABS8S_PCREL: return R_RX_NONE;
1345 case R_RX_ABS16UL: return R_RX_ABS8UL;
1346 case R_RX_ABS16UW: return R_RX_ABS8UW;
1347 case R_RX_ABS8UL: return R_RX_NONE;
1348 case R_RX_ABS8UW: return R_RX_NONE;
1353 /* Delete some bytes from a section while relaxing. */
1356 elf32_rx_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, int count,
1357 Elf_Internal_Rela *alignment_rel, int force_snip)
1359 Elf_Internal_Shdr * symtab_hdr;
1360 unsigned int sec_shndx;
1361 bfd_byte * contents;
1362 Elf_Internal_Rela * irel;
1363 Elf_Internal_Rela * irelend;
1364 Elf_Internal_Sym * isym;
1365 Elf_Internal_Sym * isymend;
1367 unsigned int symcount;
1368 struct elf_link_hash_entry ** sym_hashes;
1369 struct elf_link_hash_entry ** end_hashes;
1374 sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
1376 contents = elf_section_data (sec)->this_hdr.contents;
1378 /* The deletion must stop at the next alignment boundary, if
1379 ALIGNMENT_REL is non-NULL. */
1382 toaddr = alignment_rel->r_offset;
1384 irel = elf_section_data (sec)->relocs;
1385 irelend = irel + sec->reloc_count;
1387 /* Actually delete the bytes. */
1388 memmove (contents + addr, contents + addr + count,
1389 (size_t) (toaddr - addr - count));
1391 /* If we don't have an alignment marker to worry about, we can just
1392 shrink the section. Otherwise, we have to fill in the newly
1393 created gap with NOP insns (0x03). */
1397 memset (contents + toaddr - count, 0x03, count);
1399 /* Adjust all the relocs. */
1400 for (irel = elf_section_data (sec)->relocs; irel < irelend; irel++)
1402 /* Get the new reloc address. */
1403 if (irel->r_offset > addr
1404 && (irel->r_offset < toaddr
1405 || (force_snip && irel->r_offset == toaddr)))
1406 irel->r_offset -= count;
1408 /* If we see an ALIGN marker at the end of the gap, we move it
1409 to the beginning of the gap, since marking these gaps is what
1411 if (irel->r_offset == toaddr
1412 && ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
1413 && irel->r_addend & RX_RELAXA_ALIGN)
1414 irel->r_offset -= count;
1417 /* Adjust the local symbols defined in this section. */
1418 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
1419 isym = (Elf_Internal_Sym *) symtab_hdr->contents;
1420 isymend = isym + symtab_hdr->sh_info;
1422 for (; isym < isymend; isym++)
1424 /* If the symbol is in the range of memory we just moved, we
1425 have to adjust its value. */
1426 if (isym->st_shndx == sec_shndx
1427 && isym->st_value > addr
1428 && isym->st_value < toaddr)
1429 isym->st_value -= count;
1431 /* If the symbol *spans* the bytes we just deleted (i.e. it's
1432 *end* is in the moved bytes but it's *start* isn't), then we
1433 must adjust its size. */
1434 if (isym->st_shndx == sec_shndx
1435 && isym->st_value < addr
1436 && isym->st_value + isym->st_size > addr
1437 && isym->st_value + isym->st_size < toaddr)
1438 isym->st_size -= count;
1441 /* Now adjust the global symbols defined in this section. */
1442 symcount = (symtab_hdr->sh_size / sizeof (Elf32_External_Sym)
1443 - symtab_hdr->sh_info);
1444 sym_hashes = elf_sym_hashes (abfd);
1445 end_hashes = sym_hashes + symcount;
1447 for (; sym_hashes < end_hashes; sym_hashes++)
1449 struct elf_link_hash_entry *sym_hash = *sym_hashes;
1451 if ((sym_hash->root.type == bfd_link_hash_defined
1452 || sym_hash->root.type == bfd_link_hash_defweak)
1453 && sym_hash->root.u.def.section == sec)
1455 /* As above, adjust the value if needed. */
1456 if (sym_hash->root.u.def.value > addr
1457 && sym_hash->root.u.def.value < toaddr)
1458 sym_hash->root.u.def.value -= count;
1460 /* As above, adjust the size if needed. */
1461 if (sym_hash->root.u.def.value < addr
1462 && sym_hash->root.u.def.value + sym_hash->size > addr
1463 && sym_hash->root.u.def.value + sym_hash->size < toaddr)
1464 sym_hash->size -= count;
1471 /* Used to sort relocs by address. If relocs have the same address,
1472 we maintain their relative order, except that R_RX_RH_RELAX
1473 alignment relocs must be the first reloc for any given address. */
1476 reloc_bubblesort (Elf_Internal_Rela * r, int count)
1480 bfd_boolean swappit;
1482 /* This is almost a classic bubblesort. It's the slowest sort, but
1483 we're taking advantage of the fact that the relocations are
1484 mostly in order already (the assembler emits them that way) and
1485 we need relocs with the same address to remain in the same
1491 for (i = 0; i < count - 1; i ++)
1493 if (r[i].r_offset > r[i + 1].r_offset)
1495 else if (r[i].r_offset < r[i + 1].r_offset)
1497 else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
1498 && (r[i + 1].r_addend & RX_RELAXA_ALIGN))
1500 else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
1501 && (r[i + 1].r_addend & RX_RELAXA_ELIGN)
1502 && !(ELF32_R_TYPE (r[i].r_info) == R_RX_RH_RELAX
1503 && (r[i].r_addend & RX_RELAXA_ALIGN)))
1510 Elf_Internal_Rela tmp;
1515 /* If we do move a reloc back, re-scan to see if it
1516 needs to be moved even further back. This avoids
1517 most of the O(n^2) behavior for our cases. */
1527 #define OFFSET_FOR_RELOC(rel, lrel, scale) \
1528 rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
1529 lrel, abfd, sec, link_info, scale)
1532 rx_offset_for_reloc (bfd * abfd,
1533 Elf_Internal_Rela * rel,
1534 Elf_Internal_Shdr * symtab_hdr,
1535 Elf_External_Sym_Shndx * shndx_buf ATTRIBUTE_UNUSED,
1536 Elf_Internal_Sym * intsyms,
1537 Elf_Internal_Rela ** lrel,
1539 asection * input_section,
1540 struct bfd_link_info * info,
1544 bfd_reloc_status_type r;
1548 /* REL is the first of 1..N relocations. We compute the symbol
1549 value for each relocation, then combine them if needed. LREL
1550 gets a pointer to the last relocation used. */
1555 /* Get the value of the symbol referred to by the reloc. */
1556 if (ELF32_R_SYM (rel->r_info) < symtab_hdr->sh_info)
1558 /* A local symbol. */
1559 Elf_Internal_Sym *isym;
1562 isym = intsyms + ELF32_R_SYM (rel->r_info);
1564 if (isym->st_shndx == SHN_UNDEF)
1565 ssec = bfd_und_section_ptr;
1566 else if (isym->st_shndx == SHN_ABS)
1567 ssec = bfd_abs_section_ptr;
1568 else if (isym->st_shndx == SHN_COMMON)
1569 ssec = bfd_com_section_ptr;
1571 ssec = bfd_section_from_elf_index (abfd,
1574 /* Initial symbol value. */
1575 symval = isym->st_value;
1577 /* GAS may have made this symbol relative to a section, in
1578 which case, we have to add the addend to find the
1580 if (ELF_ST_TYPE (isym->st_info) == STT_SECTION)
1581 symval += rel->r_addend;
1585 if ((ssec->flags & SEC_MERGE)
1586 && ssec->sec_info_type == ELF_INFO_TYPE_MERGE)
1587 symval = _bfd_merged_section_offset (abfd, & ssec,
1588 elf_section_data (ssec)->sec_info,
1592 /* Now make the offset relative to where the linker is putting it. */
1595 ssec->output_section->vma + ssec->output_offset;
1597 symval += rel->r_addend;
1602 struct elf_link_hash_entry * h;
1604 /* An external symbol. */
1605 indx = ELF32_R_SYM (rel->r_info) - symtab_hdr->sh_info;
1606 h = elf_sym_hashes (abfd)[indx];
1607 BFD_ASSERT (h != NULL);
1609 if (h->root.type != bfd_link_hash_defined
1610 && h->root.type != bfd_link_hash_defweak)
1612 /* This appears to be a reference to an undefined
1613 symbol. Just ignore it--it will be caught by the
1614 regular reloc processing. */
1620 symval = (h->root.u.def.value
1621 + h->root.u.def.section->output_section->vma
1622 + h->root.u.def.section->output_offset);
1624 symval += rel->r_addend;
1627 switch (ELF32_R_TYPE (rel->r_info))
1630 RX_STACK_PUSH (symval);
1634 RX_STACK_POP (tmp1);
1636 RX_STACK_PUSH (tmp1);
1640 RX_STACK_POP (tmp1);
1641 RX_STACK_POP (tmp2);
1643 RX_STACK_PUSH (tmp1);
1647 RX_STACK_POP (tmp1);
1648 RX_STACK_POP (tmp2);
1650 RX_STACK_PUSH (tmp2);
1654 RX_STACK_POP (tmp1);
1655 RX_STACK_POP (tmp2);
1657 RX_STACK_PUSH (tmp1);
1661 RX_STACK_POP (tmp1);
1662 RX_STACK_POP (tmp2);
1664 RX_STACK_PUSH (tmp1);
1668 RX_STACK_POP (tmp1);
1669 RX_STACK_POP (tmp2);
1671 RX_STACK_PUSH (tmp1);
1675 RX_STACK_POP (tmp1);
1676 RX_STACK_POP (tmp2);
1678 RX_STACK_PUSH (tmp1);
1681 case R_RX_OPsctsize:
1682 RX_STACK_PUSH (input_section->size);
1686 RX_STACK_PUSH (input_section->output_section->vma);
1690 RX_STACK_POP (tmp1);
1691 RX_STACK_POP (tmp2);
1693 RX_STACK_PUSH (tmp1);
1697 RX_STACK_POP (tmp1);
1698 RX_STACK_POP (tmp2);
1700 RX_STACK_PUSH (tmp1);
1704 RX_STACK_POP (tmp1);
1705 RX_STACK_POP (tmp2);
1707 RX_STACK_PUSH (tmp1);
1711 RX_STACK_POP (tmp1);
1713 RX_STACK_PUSH (tmp1);
1717 RX_STACK_POP (tmp1);
1718 RX_STACK_POP (tmp2);
1720 RX_STACK_PUSH (tmp1);
1724 RX_STACK_PUSH (get_romstart (&r, info, input_bfd, input_section, rel->r_offset));
1728 RX_STACK_PUSH (get_ramstart (&r, info, input_bfd, input_section, rel->r_offset));
1736 RX_STACK_POP (symval);
1747 RX_STACK_POP (symval);
1755 RX_STACK_POP (symval);
1766 move_reloc (Elf_Internal_Rela * irel, Elf_Internal_Rela * srel, int delta)
1768 bfd_vma old_offset = srel->r_offset;
1771 while (irel <= srel)
1773 if (irel->r_offset == old_offset)
1774 irel->r_offset += delta;
1779 /* Relax one section. */
1782 elf32_rx_relax_section (bfd * abfd,
1784 struct bfd_link_info * link_info,
1785 bfd_boolean * again,
1786 bfd_boolean allow_pcrel3)
1788 Elf_Internal_Shdr * symtab_hdr;
1789 Elf_Internal_Shdr * shndx_hdr;
1790 Elf_Internal_Rela * internal_relocs;
1791 Elf_Internal_Rela * free_relocs = NULL;
1792 Elf_Internal_Rela * irel;
1793 Elf_Internal_Rela * srel;
1794 Elf_Internal_Rela * irelend;
1795 Elf_Internal_Rela * next_alignment;
1796 Elf_Internal_Rela * prev_alignment;
1797 bfd_byte * contents = NULL;
1798 bfd_byte * free_contents = NULL;
1799 Elf_Internal_Sym * intsyms = NULL;
1800 Elf_Internal_Sym * free_intsyms = NULL;
1801 Elf_External_Sym_Shndx * shndx_buf = NULL;
1807 int section_alignment_glue;
1808 /* how much to scale the relocation by - 1, 2, or 4. */
1811 /* Assume nothing changes. */
1814 /* We don't have to do anything for a relocatable link, if
1815 this section does not have relocs, or if this is not a
1817 if (link_info->relocatable
1818 || (sec->flags & SEC_RELOC) == 0
1819 || sec->reloc_count == 0
1820 || (sec->flags & SEC_CODE) == 0)
1823 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
1824 shndx_hdr = &elf_tdata (abfd)->symtab_shndx_hdr;
1826 sec_start = sec->output_section->vma + sec->output_offset;
1828 /* Get the section contents. */
1829 if (elf_section_data (sec)->this_hdr.contents != NULL)
1830 contents = elf_section_data (sec)->this_hdr.contents;
1831 /* Go get them off disk. */
1834 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
1836 elf_section_data (sec)->this_hdr.contents = contents;
1839 /* Read this BFD's symbols. */
1840 /* Get cached copy if it exists. */
1841 if (symtab_hdr->contents != NULL)
1842 intsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
1845 intsyms = bfd_elf_get_elf_syms (abfd, symtab_hdr, symtab_hdr->sh_info, 0, NULL, NULL, NULL);
1846 symtab_hdr->contents = (bfd_byte *) intsyms;
1849 if (shndx_hdr->sh_size != 0)
1853 amt = symtab_hdr->sh_info;
1854 amt *= sizeof (Elf_External_Sym_Shndx);
1855 shndx_buf = (Elf_External_Sym_Shndx *) bfd_malloc (amt);
1856 if (shndx_buf == NULL)
1858 if (bfd_seek (abfd, shndx_hdr->sh_offset, SEEK_SET) != 0
1859 || bfd_bread ((PTR) shndx_buf, amt, abfd) != amt)
1861 shndx_hdr->contents = (bfd_byte *) shndx_buf;
1864 /* Get a copy of the native relocations. */
1865 internal_relocs = (_bfd_elf_link_read_relocs
1866 (abfd, sec, (PTR) NULL, (Elf_Internal_Rela *) NULL,
1867 link_info->keep_memory));
1868 if (internal_relocs == NULL)
1870 if (! link_info->keep_memory)
1871 free_relocs = internal_relocs;
1873 /* The RL_ relocs must be just before the operand relocs they go
1874 with, so we must sort them to guarantee this. We use bubblesort
1875 instead of qsort so we can guarantee that relocs with the same
1876 address remain in the same relative order. */
1877 reloc_bubblesort (internal_relocs, sec->reloc_count);
1879 /* Walk through them looking for relaxing opportunities. */
1880 irelend = internal_relocs + sec->reloc_count;
1882 /* This will either be NULL or a pointer to the next alignment
1884 next_alignment = internal_relocs;
1885 /* This will be the previous alignment, although at first it points
1886 to the first real relocation. */
1887 prev_alignment = internal_relocs;
1889 /* We calculate worst case shrinkage caused by alignment directives.
1890 No fool-proof, but better than either ignoring the problem or
1891 doing heavy duty analysis of all the alignment markers in all
1893 section_alignment_glue = 0;
1894 for (irel = internal_relocs; irel < irelend; irel++)
1895 if (ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
1896 && irel->r_addend & RX_RELAXA_ALIGN)
1898 int this_glue = 1 << (irel->r_addend & RX_RELAXA_ANUM);
1900 if (section_alignment_glue < this_glue)
1901 section_alignment_glue = this_glue;
1903 /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
1905 section_alignment_glue *= 2;
1907 for (irel = internal_relocs; irel < irelend; irel++)
1909 unsigned char *insn;
1912 /* The insns we care about are all marked with one of these. */
1913 if (ELF32_R_TYPE (irel->r_info) != R_RX_RH_RELAX)
1916 if (irel->r_addend & RX_RELAXA_ALIGN
1917 || next_alignment == internal_relocs)
1919 /* When we delete bytes, we need to maintain all the alignments
1920 indicated. In addition, we need to be careful about relaxing
1921 jumps across alignment boundaries - these displacements
1922 *grow* when we delete bytes. For now, don't shrink
1923 displacements across an alignment boundary, just in case.
1924 Note that this only affects relocations to the same
1926 prev_alignment = next_alignment;
1927 next_alignment += 2;
1928 while (next_alignment < irelend
1929 && (ELF32_R_TYPE (next_alignment->r_info) != R_RX_RH_RELAX
1930 || !(next_alignment->r_addend & RX_RELAXA_ELIGN)))
1932 if (next_alignment >= irelend || next_alignment->r_offset == 0)
1933 next_alignment = NULL;
1936 /* When we hit alignment markers, see if we've shrunk enough
1937 before them to reduce the gap without violating the alignment
1939 if (irel->r_addend & RX_RELAXA_ALIGN)
1941 /* At this point, the next relocation *should* be the ELIGN
1943 Elf_Internal_Rela *erel = irel + 1;
1944 unsigned int alignment, nbytes;
1946 if (ELF32_R_TYPE (erel->r_info) != R_RX_RH_RELAX)
1948 if (!(erel->r_addend & RX_RELAXA_ELIGN))
1951 alignment = 1 << (irel->r_addend & RX_RELAXA_ANUM);
1953 if (erel->r_offset - irel->r_offset < alignment)
1956 nbytes = erel->r_offset - irel->r_offset;
1957 nbytes /= alignment;
1958 nbytes *= alignment;
1960 elf32_rx_relax_delete_bytes (abfd, sec, erel->r_offset-nbytes, nbytes, next_alignment,
1961 erel->r_offset == sec->size);
1967 if (irel->r_addend & RX_RELAXA_ELIGN)
1970 insn = contents + irel->r_offset;
1972 nrelocs = irel->r_addend & RX_RELAXA_RNUM;
1974 /* At this point, we have an insn that is a candidate for linker
1975 relaxation. There are NRELOCS relocs following that may be
1976 relaxed, although each reloc may be made of more than one
1977 reloc entry (such as gp-rel symbols). */
1979 /* Get the value of the symbol referred to by the reloc. Just
1980 in case this is the last reloc in the list, use the RL's
1981 addend to choose between this reloc (no addend) or the next
1982 (yes addend, which means at least one following reloc). */
1984 /* srel points to the "current" reloction for this insn -
1985 actually the last reloc for a given operand, which is the one
1986 we need to update. We check the relaxations in the same
1987 order that the relocations happen, so we'll just push it
1991 pc = sec->output_section->vma + sec->output_offset
1995 symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
1996 pcrel = symval - pc + srel->r_addend; \
1999 #define SNIPNR(offset, nbytes) \
2000 elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0);
2001 #define SNIP(offset, nbytes, newtype) \
2002 SNIPNR (offset, nbytes); \
2003 srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
2005 /* The order of these bit tests must match the order that the
2006 relocs appear in. Since we sorted those by offset, we can
2009 /* Note that the numbers in, say, DSP6 are the bit offsets of
2010 the code fields that describe the operand. Bits number 0 for
2011 the MSB of insn[0]. */
2018 if (irel->r_addend & RX_RELAXA_DSP6)
2023 if (code == 2 && symval/scale <= 255)
2025 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2028 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2029 if (newrel != ELF32_R_TYPE (srel->r_info))
2031 SNIP (3, 1, newrel);
2036 else if (code == 1 && symval == 0)
2039 SNIP (2, 1, R_RX_NONE);
2043 /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
2044 else if (code == 1 && symval/scale <= 31
2045 /* Decodable bits. */
2046 && (insn[0] & 0xcc) == 0xcc
2048 && (insn[0] & 0x30) != 3
2049 /* Register MSBs. */
2050 && (insn[1] & 0x88) == 0x00)
2054 insn[0] = 0x88 | (insn[0] & 0x30);
2055 /* The register fields are in the right place already. */
2057 /* We can't relax this new opcode. */
2060 switch ((insn[0] & 0x30) >> 4)
2063 newrel = R_RX_RH_ABS5p5B;
2066 newrel = R_RX_RH_ABS5p5W;
2069 newrel = R_RX_RH_ABS5p5L;
2073 move_reloc (irel, srel, -2);
2074 SNIP (2, 1, newrel);
2077 /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
2078 else if (code == 1 && symval/scale <= 31
2079 /* Decodable bits. */
2080 && (insn[0] & 0xf8) == 0x58
2081 /* Register MSBs. */
2082 && (insn[1] & 0x88) == 0x00)
2086 insn[0] = 0xb0 | ((insn[0] & 0x04) << 1);
2087 /* The register fields are in the right place already. */
2089 /* We can't relax this new opcode. */
2092 switch ((insn[0] & 0x08) >> 3)
2095 newrel = R_RX_RH_ABS5p5B;
2098 newrel = R_RX_RH_ABS5p5W;
2102 move_reloc (irel, srel, -2);
2103 SNIP (2, 1, newrel);
2107 /* A DSP4 operand always follows a DSP6 operand, even if there's
2108 no relocation for it. We have to read the code out of the
2109 opcode to calculate the offset of the operand. */
2110 if (irel->r_addend & RX_RELAXA_DSP4)
2112 int code6, offset = 0;
2116 code6 = insn[0] & 0x03;
2119 case 0: offset = 2; break;
2120 case 1: offset = 3; break;
2121 case 2: offset = 4; break;
2122 case 3: offset = 2; break;
2125 code = (insn[0] & 0x0c) >> 2;
2127 if (code == 2 && symval / scale <= 255)
2129 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2133 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2134 if (newrel != ELF32_R_TYPE (srel->r_info))
2136 SNIP (offset+1, 1, newrel);
2141 else if (code == 1 && symval == 0)
2144 SNIP (offset, 1, R_RX_NONE);
2147 /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
2148 else if (code == 1 && symval/scale <= 31
2149 /* Decodable bits. */
2150 && (insn[0] & 0xc3) == 0xc3
2152 && (insn[0] & 0x30) != 3
2153 /* Register MSBs. */
2154 && (insn[1] & 0x88) == 0x00)
2158 insn[0] = 0x80 | (insn[0] & 0x30);
2159 /* The register fields are in the right place already. */
2161 /* We can't relax this new opcode. */
2164 switch ((insn[0] & 0x30) >> 4)
2167 newrel = R_RX_RH_ABS5p5B;
2170 newrel = R_RX_RH_ABS5p5W;
2173 newrel = R_RX_RH_ABS5p5L;
2177 move_reloc (irel, srel, -2);
2178 SNIP (2, 1, newrel);
2182 /* These always occur alone, but the offset depends on whether
2183 it's a MEMEX opcode (0x06) or not. */
2184 if (irel->r_addend & RX_RELAXA_DSP14)
2189 if (insn[0] == 0x06)
2196 if (code == 2 && symval / scale <= 255)
2198 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2202 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2203 if (newrel != ELF32_R_TYPE (srel->r_info))
2205 SNIP (offset, 1, newrel);
2209 else if (code == 1 && symval == 0)
2212 SNIP (offset, 1, R_RX_NONE);
2223 /* These always occur alone. */
2224 if (irel->r_addend & RX_RELAXA_IMM6)
2230 /* These relocations sign-extend, so we must do signed compares. */
2231 ssymval = (long) symval;
2233 code = insn[0] & 0x03;
2235 if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
2237 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2241 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2242 if (newrel != ELF32_R_TYPE (srel->r_info))
2244 SNIP (2, 1, newrel);
2249 else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
2251 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2255 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2256 if (newrel != ELF32_R_TYPE (srel->r_info))
2258 SNIP (2, 1, newrel);
2263 /* Special case UIMM8 format: CMP #uimm8,Rdst. */
2264 else if (code == 2 && ssymval <= 255 && ssymval >= 16
2265 /* Decodable bits. */
2266 && (insn[0] & 0xfc) == 0x74
2267 /* Decodable bits. */
2268 && ((insn[1] & 0xf0) == 0x00))
2273 insn[1] = 0x50 | (insn[1] & 0x0f);
2275 /* We can't relax this new opcode. */
2278 if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
2279 newrel = R_RX_ABS8U;
2281 newrel = R_RX_DIR8U;
2283 SNIP (2, 1, newrel);
2287 else if (code == 2 && ssymval <= 127 && ssymval >= -128)
2289 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2293 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2294 if (newrel != ELF32_R_TYPE (srel->r_info))
2296 SNIP (2, 1, newrel);
2301 /* Special case UIMM4 format: CMP, MUL, AND, OR. */
2302 else if (code == 1 && ssymval <= 15 && ssymval >= 0
2303 /* Decodable bits and immediate type. */
2305 /* Decodable bits. */
2306 && (insn[1] & 0xc0) == 0x00)
2308 static const int newop[4] = { 1, 3, 4, 5 };
2310 insn[0] = 0x60 | newop[insn[1] >> 4];
2311 /* The register number doesn't move. */
2313 /* We can't relax this new opcode. */
2316 move_reloc (irel, srel, -1);
2318 SNIP (2, 1, R_RX_RH_UIMM4p8);
2322 /* Special case UIMM4 format: ADD -> ADD/SUB. */
2323 else if (code == 1 && ssymval <= 15 && ssymval >= -15
2324 /* Decodable bits and immediate type. */
2326 /* Same register for source and destination. */
2327 && ((insn[1] >> 4) == (insn[1] & 0x0f)))
2331 /* Note that we can't turn "add $0,Rs" into a NOP
2332 because the flags need to be set right. */
2336 insn[0] = 0x60; /* Subtract. */
2337 newrel = R_RX_RH_UNEG4p8;
2341 insn[0] = 0x62; /* Add. */
2342 newrel = R_RX_RH_UIMM4p8;
2345 /* The register number is in the right place. */
2347 /* We can't relax this new opcode. */
2350 move_reloc (irel, srel, -1);
2352 SNIP (2, 1, newrel);
2357 /* These are either matched with a DSP6 (2-byte base) or an id24
2359 if (irel->r_addend & RX_RELAXA_IMM12)
2361 int dspcode, offset = 0;
2366 if ((insn[0] & 0xfc) == 0xfc)
2367 dspcode = 1; /* Just something with one byte operand. */
2369 dspcode = insn[0] & 3;
2372 case 0: offset = 2; break;
2373 case 1: offset = 3; break;
2374 case 2: offset = 4; break;
2375 case 3: offset = 2; break;
2378 /* These relocations sign-extend, so we must do signed compares. */
2379 ssymval = (long) symval;
2381 code = (insn[1] >> 2) & 3;
2382 if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
2384 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2388 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2389 if (newrel != ELF32_R_TYPE (srel->r_info))
2391 SNIP (offset, 1, newrel);
2396 else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
2398 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2402 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2403 if (newrel != ELF32_R_TYPE (srel->r_info))
2405 SNIP (offset, 1, newrel);
2410 /* Special case UIMM8 format: MOV #uimm8,Rdst. */
2411 else if (code == 2 && ssymval <= 255 && ssymval >= 16
2412 /* Decodable bits. */
2414 /* Decodable bits. */
2415 && ((insn[1] & 0x03) == 0x02))
2420 insn[1] = 0x40 | (insn[1] >> 4);
2422 /* We can't relax this new opcode. */
2425 if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
2426 newrel = R_RX_ABS8U;
2428 newrel = R_RX_DIR8U;
2430 SNIP (2, 1, newrel);
2434 else if (code == 2 && ssymval <= 127 && ssymval >= -128)
2436 unsigned int newrel = ELF32_R_TYPE(srel->r_info);
2440 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2441 if (newrel != ELF32_R_TYPE(srel->r_info))
2443 SNIP (offset, 1, newrel);
2448 /* Special case UIMM4 format: MOV #uimm4,Rdst. */
2449 else if (code == 1 && ssymval <= 15 && ssymval >= 0
2450 /* Decodable bits. */
2452 /* Decodable bits. */
2453 && ((insn[1] & 0x03) == 0x02))
2456 insn[1] = insn[1] >> 4;
2458 /* We can't relax this new opcode. */
2461 move_reloc (irel, srel, -1);
2463 SNIP (2, 1, R_RX_RH_UIMM4p8);
2468 if (irel->r_addend & RX_RELAXA_BRA)
2470 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2472 int alignment_glue = 0;
2476 /* Branches over alignment chunks are problematic, as
2477 deleting bytes here makes the branch *further* away. We
2478 can be agressive with branches within this alignment
2479 block, but not branches outside it. */
2480 if ((prev_alignment == NULL
2481 || symval < (bfd_vma)(sec_start + prev_alignment->r_offset))
2482 && (next_alignment == NULL
2483 || symval > (bfd_vma)(sec_start + next_alignment->r_offset)))
2484 alignment_glue = section_alignment_glue;
2486 if (ELF32_R_TYPE(srel[1].r_info) == R_RX_RH_RELAX
2487 && srel[1].r_addend & RX_RELAXA_BRA
2488 && srel[1].r_offset < irel->r_offset + pcrel)
2491 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2493 /* The values we compare PCREL with are not what you'd
2494 expect; they're off by a little to compensate for (1)
2495 where the reloc is relative to the insn, and (2) how much
2496 the insn is going to change when we relax it. */
2498 /* These we have to decode. */
2501 case 0x04: /* BRA pcdsp:24 */
2502 if (-32768 + alignment_glue <= pcrel
2503 && pcrel <= 32765 - alignment_glue)
2506 SNIP (3, 1, newrel);
2511 case 0x38: /* BRA pcdsp:16 */
2512 if (-128 + alignment_glue <= pcrel
2513 && pcrel <= 127 - alignment_glue)
2516 SNIP (2, 1, newrel);
2521 case 0x2e: /* BRA pcdsp:8 */
2522 /* Note that there's a risk here of shortening things so
2523 much that we no longer fit this reloc; it *should*
2524 only happen when you branch across a branch, and that
2525 branch also devolves into BRA.S. "Real" code should
2527 if (max_pcrel3 + alignment_glue <= pcrel
2528 && pcrel <= 10 - alignment_glue
2532 SNIP (1, 1, newrel);
2533 move_reloc (irel, srel, -1);
2538 case 0x05: /* BSR pcdsp:24 */
2539 if (-32768 + alignment_glue <= pcrel
2540 && pcrel <= 32765 - alignment_glue)
2543 SNIP (1, 1, newrel);
2548 case 0x3a: /* BEQ.W pcdsp:16 */
2549 case 0x3b: /* BNE.W pcdsp:16 */
2550 if (-128 + alignment_glue <= pcrel
2551 && pcrel <= 127 - alignment_glue)
2553 insn[0] = 0x20 | (insn[0] & 1);
2554 SNIP (1, 1, newrel);
2559 case 0x20: /* BEQ.B pcdsp:8 */
2560 case 0x21: /* BNE.B pcdsp:8 */
2561 if (max_pcrel3 + alignment_glue <= pcrel
2562 && pcrel - alignment_glue <= 10
2565 insn[0] = 0x10 | ((insn[0] & 1) << 3);
2566 SNIP (1, 1, newrel);
2567 move_reloc (irel, srel, -1);
2572 case 0x16: /* synthetic BNE dsp24 */
2573 case 0x1e: /* synthetic BEQ dsp24 */
2574 if (-32767 + alignment_glue <= pcrel
2575 && pcrel <= 32766 - alignment_glue
2578 if (insn[0] == 0x16)
2582 /* We snip out the bytes at the end else the reloc
2583 will get moved too, and too much. */
2584 SNIP (3, 2, newrel);
2585 move_reloc (irel, srel, -1);
2591 /* Special case - synthetic conditional branches, pcrel24.
2592 Note that EQ and NE have been handled above. */
2593 if ((insn[0] & 0xf0) == 0x20
2596 && srel->r_offset != irel->r_offset + 1
2597 && -32767 + alignment_glue <= pcrel
2598 && pcrel <= 32766 - alignment_glue)
2602 SNIP (5, 1, newrel);
2606 /* Special case - synthetic conditional branches, pcrel16 */
2607 if ((insn[0] & 0xf0) == 0x20
2610 && srel->r_offset != irel->r_offset + 1
2611 && -127 + alignment_glue <= pcrel
2612 && pcrel <= 126 - alignment_glue)
2614 int cond = (insn[0] & 0x0f) ^ 0x01;
2616 insn[0] = 0x20 | cond;
2617 /* By moving the reloc first, we avoid having
2618 delete_bytes move it also. */
2619 move_reloc (irel, srel, -2);
2620 SNIP (2, 3, newrel);
2625 BFD_ASSERT (nrelocs == 0);
2627 /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
2628 use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
2629 because it may have one or two relocations. */
2630 if ((insn[0] & 0xfc) == 0xf8
2631 && (insn[1] & 0x80) == 0x00
2632 && (insn[0] & 0x03) != 0x03)
2634 int dcode, icode, reg, ioff, dscale, ilen;
2635 bfd_vma disp_val = 0;
2637 Elf_Internal_Rela * disp_rel = 0;
2638 Elf_Internal_Rela * imm_rel = 0;
2643 dcode = insn[0] & 0x03;
2644 icode = (insn[1] >> 2) & 0x03;
2645 reg = (insn[1] >> 4) & 0x0f;
2647 ioff = dcode == 1 ? 3 : dcode == 2 ? 4 : 2;
2649 /* Figure out what the dispacement is. */
2650 if (dcode == 1 || dcode == 2)
2652 /* There's a displacement. See if there's a reloc for it. */
2653 if (srel[1].r_offset == irel->r_offset + 2)
2665 #if RX_OPCODE_BIG_ENDIAN
2666 disp_val = insn[2] * 256 + insn[3];
2668 disp_val = insn[2] + insn[3] * 256;
2671 switch (insn[1] & 3)
2687 /* Figure out what the immediate is. */
2688 if (srel[1].r_offset == irel->r_offset + ioff)
2691 imm_val = (long) symval;
2696 unsigned char * ip = insn + ioff;
2701 /* For byte writes, we don't sign extend. Makes the math easier later. */
2705 imm_val = (char) ip[0];
2708 #if RX_OPCODE_BIG_ENDIAN
2709 imm_val = ((char) ip[0] << 8) | ip[1];
2711 imm_val = ((char) ip[1] << 8) | ip[0];
2715 #if RX_OPCODE_BIG_ENDIAN
2716 imm_val = ((char) ip[0] << 16) | (ip[1] << 8) | ip[2];
2718 imm_val = ((char) ip[2] << 16) | (ip[1] << 8) | ip[0];
2722 #if RX_OPCODE_BIG_ENDIAN
2723 imm_val = (ip[0] << 24) | (ip[1] << 16) | (ip[2] << 8) | ip[3];
2725 imm_val = (ip[3] << 24) | (ip[2] << 16) | (ip[1] << 8) | ip[0];
2759 /* The shortcut happens when the immediate is 0..255,
2760 register r0 to r7, and displacement (scaled) 0..31. */
2762 if (0 <= imm_val && imm_val <= 255
2763 && 0 <= reg && reg <= 7
2764 && disp_val / dscale <= 31)
2766 insn[0] = 0x3c | (insn[1] & 0x03);
2767 insn[1] = (((disp_val / dscale) << 3) & 0x80) | (reg << 4) | ((disp_val/dscale) & 0x0f);
2772 int newrel = R_RX_NONE;
2777 newrel = R_RX_RH_ABS5p8B;
2780 newrel = R_RX_RH_ABS5p8W;
2783 newrel = R_RX_RH_ABS5p8L;
2786 disp_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (disp_rel->r_info), newrel);
2787 move_reloc (irel, disp_rel, -1);
2791 imm_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (imm_rel->r_info), R_RX_DIR8U);
2792 move_reloc (disp_rel ? disp_rel : irel,
2794 irel->r_offset - imm_rel->r_offset + 2);
2797 SNIPNR (3, ilen - 3);
2800 /* We can't relax this new opcode. */
2806 /* We can't reliably relax branches to DIR3U_PCREL unless we know
2807 whatever they're branching over won't shrink any more. If we're
2808 basically done here, do one more pass just for branches - but
2809 don't request a pass after that one! */
2810 if (!*again && !allow_pcrel3)
2812 bfd_boolean ignored;
2814 elf32_rx_relax_section (abfd, sec, link_info, &ignored, TRUE);
2820 if (free_relocs != NULL)
2823 if (free_contents != NULL)
2824 free (free_contents);
2826 if (shndx_buf != NULL)
2828 shndx_hdr->contents = NULL;
2832 if (free_intsyms != NULL)
2833 free (free_intsyms);
2839 elf32_rx_relax_section_wrapper (bfd * abfd,
2841 struct bfd_link_info * link_info,
2842 bfd_boolean * again)
2844 return elf32_rx_relax_section (abfd, sec, link_info, again, FALSE);
2847 /* Function to set the ELF flag bits. */
2850 rx_elf_set_private_flags (bfd * abfd, flagword flags)
2852 elf_elfheader (abfd)->e_flags = flags;
2853 elf_flags_init (abfd) = TRUE;
2857 static bfd_boolean no_warn_mismatch = FALSE;
2859 void bfd_elf32_rx_set_target_flags (bfd_boolean);
2862 bfd_elf32_rx_set_target_flags (bfd_boolean user_no_warn_mismatch)
2864 no_warn_mismatch = user_no_warn_mismatch;
2867 /* Merge backend specific data from an object file to the output
2868 object file when linking. */
2871 rx_elf_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
2875 bfd_boolean error = FALSE;
2877 new_flags = elf_elfheader (ibfd)->e_flags;
2878 old_flags = elf_elfheader (obfd)->e_flags;
2880 if (!elf_flags_init (obfd))
2882 /* First call, no flags set. */
2883 elf_flags_init (obfd) = TRUE;
2884 elf_elfheader (obfd)->e_flags = new_flags;
2886 else if (old_flags != new_flags)
2888 flagword known_flags = E_FLAG_RX_64BIT_DOUBLES | E_FLAG_RX_DSP;
2890 if ((old_flags ^ new_flags) & known_flags)
2892 /* Only complain if flag bits we care about do not match.
2893 Other bits may be set, since older binaries did use some
2894 deprecated flags. */
2895 if (no_warn_mismatch)
2897 elf_elfheader (obfd)->e_flags = (new_flags | old_flags) & known_flags;
2901 (*_bfd_error_handler)
2902 ("ELF header flags mismatch: old_flags = 0x%.8lx, new_flags = 0x%.8lx, filename = %s",
2903 old_flags, new_flags, bfd_get_filename (ibfd));
2908 elf_elfheader (obfd)->e_flags = new_flags & known_flags;
2912 bfd_set_error (bfd_error_bad_value);
2918 rx_elf_print_private_bfd_data (bfd * abfd, void * ptr)
2920 FILE * file = (FILE *) ptr;
2923 BFD_ASSERT (abfd != NULL && ptr != NULL);
2925 /* Print normal ELF private data. */
2926 _bfd_elf_print_private_bfd_data (abfd, ptr);
2928 flags = elf_elfheader (abfd)->e_flags;
2929 fprintf (file, _("private flags = 0x%lx:"), (long) flags);
2931 if (flags & E_FLAG_RX_64BIT_DOUBLES)
2932 fprintf (file, _(" [64-bit doubles]"));
2933 if (flags & E_FLAG_RX_DSP)
2934 fprintf (file, _(" [dsp]"));
2940 /* Return the MACH for an e_flags value. */
2943 elf32_rx_machine (bfd * abfd)
2945 if ((elf_elfheader (abfd)->e_flags & EF_RX_CPU_MASK) == EF_RX_CPU_RX)
2952 rx_elf_object_p (bfd * abfd)
2954 bfd_default_set_arch_mach (abfd, bfd_arch_rx,
2955 elf32_rx_machine (abfd));
2962 rx_dump_symtab (bfd * abfd, void * internal_syms, void * external_syms)
2965 Elf_Internal_Sym * isymbuf;
2966 Elf_Internal_Sym * isymend;
2967 Elf_Internal_Sym * isym;
2968 Elf_Internal_Shdr * symtab_hdr;
2969 bfd_boolean free_internal = FALSE, free_external = FALSE;
2971 char * st_info_stb_str;
2972 char * st_other_str;
2973 char * st_shndx_str;
2975 if (! internal_syms)
2977 internal_syms = bfd_malloc (1000);
2980 if (! external_syms)
2982 external_syms = bfd_malloc (1000);
2986 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2987 locsymcount = symtab_hdr->sh_size / get_elf_backend_data (abfd)->s->sizeof_sym;
2989 isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
2990 symtab_hdr->sh_info, 0,
2991 internal_syms, external_syms, NULL);
2993 isymbuf = internal_syms;
2994 isymend = isymbuf + locsymcount;
2996 for (isym = isymbuf ; isym < isymend ; isym++)
2998 switch (ELF_ST_TYPE (isym->st_info))
3000 case STT_FUNC: st_info_str = "STT_FUNC";
3001 case STT_SECTION: st_info_str = "STT_SECTION";
3002 case STT_FILE: st_info_str = "STT_FILE";
3003 case STT_OBJECT: st_info_str = "STT_OBJECT";
3004 case STT_TLS: st_info_str = "STT_TLS";
3005 default: st_info_str = "";
3007 switch (ELF_ST_BIND (isym->st_info))
3009 case STB_LOCAL: st_info_stb_str = "STB_LOCAL";
3010 case STB_GLOBAL: st_info_stb_str = "STB_GLOBAL";
3011 default: st_info_stb_str = "";
3013 switch (ELF_ST_VISIBILITY (isym->st_other))
3015 case STV_DEFAULT: st_other_str = "STV_DEFAULT";
3016 case STV_INTERNAL: st_other_str = "STV_INTERNAL";
3017 case STV_PROTECTED: st_other_str = "STV_PROTECTED";
3018 default: st_other_str = "";
3020 switch (isym->st_shndx)
3022 case SHN_ABS: st_shndx_str = "SHN_ABS";
3023 case SHN_COMMON: st_shndx_str = "SHN_COMMON";
3024 case SHN_UNDEF: st_shndx_str = "SHN_UNDEF";
3025 default: st_shndx_str = "";
3028 printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
3029 "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
3031 (unsigned long) isym->st_value,
3032 (unsigned long) isym->st_size,
3034 bfd_elf_string_from_elf_section (abfd, symtab_hdr->sh_link,
3036 isym->st_info, st_info_str, st_info_stb_str,
3037 isym->st_other, st_other_str,
3038 isym->st_shndx, st_shndx_str);
3041 free (internal_syms);
3043 free (external_syms);
3047 rx_get_reloc (long reloc)
3049 if (0 <= reloc && reloc < R_RX_max)
3050 return rx_elf_howto_table[reloc].name;
3056 /* We must take care to keep the on-disk copy of any code sections
3057 that are fully linked swapped if the target is big endian, to match
3058 the Renesas tools. */
3060 /* The rule is: big endian object that are final-link executables,
3061 have code sections stored with 32-bit words swapped relative to
3062 what you'd get by default. */
3065 rx_get_section_contents (bfd * abfd,
3069 bfd_size_type count)
3071 int exec = (abfd->flags & EXEC_P) ? 1 : 0;
3072 int s_code = (section->flags & SEC_CODE) ? 1 : 0;
3076 fprintf (stderr, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
3077 (long) offset, (long) count, section->name,
3078 bfd_big_endian(abfd) ? "be" : "le",
3079 exec, s_code, (long unsigned) section->filepos,
3080 (long unsigned) offset);
3083 if (exec && s_code && bfd_big_endian (abfd))
3085 char * cloc = (char *) location;
3086 bfd_size_type cnt, end_cnt;
3090 /* Fetch and swap unaligned bytes at the beginning. */
3095 rv = _bfd_generic_get_section_contents (abfd, section, buf,
3100 bfd_putb32 (bfd_getl32 (buf), buf);
3102 cnt = 4 - (offset % 4);
3106 memcpy (location, buf + (offset % 4), cnt);
3113 end_cnt = count % 4;
3115 /* Fetch and swap the middle bytes. */
3118 rv = _bfd_generic_get_section_contents (abfd, section, cloc, offset,
3123 for (cnt = count; cnt >= 4; cnt -= 4, cloc += 4)
3124 bfd_putb32 (bfd_getl32 (cloc), cloc);
3127 /* Fetch and swap the end bytes. */
3132 /* Fetch the end bytes. */
3133 rv = _bfd_generic_get_section_contents (abfd, section, buf,
3134 offset + count - end_cnt, 4);
3138 bfd_putb32 (bfd_getl32 (buf), buf);
3139 memcpy (cloc, buf, end_cnt);
3143 rv = _bfd_generic_get_section_contents (abfd, section, location, offset, count);
3150 rx2_set_section_contents (bfd * abfd,
3152 const void * location,
3154 bfd_size_type count)
3158 fprintf (stderr, " set sec %s %08x loc %p offset %#x count %#x\n",
3159 section->name, (unsigned) section->vma, location, (int) offset, (int) count);
3160 for (i = 0; i < count; i++)
3162 if (i % 16 == 0 && i > 0)
3163 fprintf (stderr, "\n");
3165 if (i % 16 && i % 4 == 0)
3166 fprintf (stderr, " ");
3169 fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
3171 fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
3173 fprintf (stderr, "\n");
3175 return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
3177 #define _bfd_elf_set_section_contents rx2_set_section_contents
3181 rx_set_section_contents (bfd * abfd,
3183 const void * location,
3185 bfd_size_type count)
3187 bfd_boolean exec = (abfd->flags & EXEC_P) ? TRUE : FALSE;
3188 bfd_boolean s_code = (section->flags & SEC_CODE) ? TRUE : FALSE;
3190 char * swapped_data = NULL;
3192 bfd_vma caddr = section->vma + offset;
3194 bfd_size_type scount;
3199 fprintf (stderr, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
3200 (long) offset, (long) count, section->name,
3201 bfd_big_endian (abfd) ? "be" : "le",
3204 for (i = 0; i < count; i++)
3206 int a = section->vma + offset + i;
3208 if (a % 16 == 0 && a > 0)
3209 fprintf (stderr, "\n");
3211 if (a % 16 && a % 4 == 0)
3212 fprintf (stderr, " ");
3214 if (a % 16 == 0 || i == 0)
3215 fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
3217 fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
3220 fprintf (stderr, "\n");
3223 if (! exec || ! s_code || ! bfd_big_endian (abfd))
3224 return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
3226 while (count > 0 && caddr > 0 && caddr % 4)
3230 case 0: faddr = offset + 3; break;
3231 case 1: faddr = offset + 1; break;
3232 case 2: faddr = offset - 1; break;
3233 case 3: faddr = offset - 3; break;
3236 rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
3246 scount = (int)(count / 4) * 4;
3249 char * cloc = (char *) location;
3251 swapped_data = (char *) bfd_alloc (abfd, count);
3253 for (i = 0; i < count; i += 4)
3255 bfd_vma v = bfd_getl32 (cloc + i);
3256 bfd_putb32 (v, swapped_data + i);
3259 rv = _bfd_elf_set_section_contents (abfd, section, swapped_data, offset, scount);
3271 caddr = section->vma + offset;
3276 case 0: faddr = offset + 3; break;
3277 case 1: faddr = offset + 1; break;
3278 case 2: faddr = offset - 1; break;
3279 case 3: faddr = offset - 3; break;
3281 rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
3296 rx_final_link (bfd * abfd, struct bfd_link_info * info)
3300 for (o = abfd->sections; o != NULL; o = o->next)
3303 fprintf (stderr, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
3304 o->name, o->flags, o->vma, o->lma, o->size, o->rawsize);
3306 if (o->flags & SEC_CODE
3307 && bfd_big_endian (abfd)
3311 fprintf (stderr, "adjusting...\n");
3313 o->size += 4 - (o->size % 4);
3317 return bfd_elf_final_link (abfd, info);
3321 elf32_rx_modify_program_headers (bfd * abfd ATTRIBUTE_UNUSED,
3322 struct bfd_link_info * info ATTRIBUTE_UNUSED)
3324 const struct elf_backend_data * bed;
3325 struct elf_obj_tdata * tdata;
3326 Elf_Internal_Phdr * phdr;
3330 bed = get_elf_backend_data (abfd);
3331 tdata = elf_tdata (abfd);
3333 count = tdata->program_header_size / bed->s->sizeof_phdr;
3335 for (i = count; i-- != 0; )
3336 if (phdr[i].p_type == PT_LOAD)
3338 /* The Renesas tools expect p_paddr to be zero. However,
3339 there is no other way to store the writable data in ROM for
3340 startup initialization. So, we let the linker *think*
3341 we're using paddr and vaddr the "usual" way, but at the
3342 last minute we move the paddr into the vaddr (which is what
3343 the simulator uses) and zero out paddr. Note that this
3344 does not affect the section headers, just the program
3345 headers. We hope. */
3346 phdr[i].p_vaddr = phdr[i].p_paddr;
3347 /* If we zero out p_paddr, then the LMA in the section table
3349 /*phdr[i].p_paddr = 0;*/
3355 #define ELF_ARCH bfd_arch_rx
3356 #define ELF_MACHINE_CODE EM_RX
3357 #define ELF_MAXPAGESIZE 0x1000
3359 #define TARGET_BIG_SYM bfd_elf32_rx_be_vec
3360 #define TARGET_BIG_NAME "elf32-rx-be"
3362 #define TARGET_LITTLE_SYM bfd_elf32_rx_le_vec
3363 #define TARGET_LITTLE_NAME "elf32-rx-le"
3365 #define elf_info_to_howto_rel NULL
3366 #define elf_info_to_howto rx_info_to_howto_rela
3367 #define elf_backend_object_p rx_elf_object_p
3368 #define elf_backend_relocate_section rx_elf_relocate_section
3369 #define elf_symbol_leading_char ('_')
3370 #define elf_backend_can_gc_sections 1
3371 #define elf_backend_modify_program_headers elf32_rx_modify_program_headers
3373 #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
3374 #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
3375 #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
3376 #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
3377 #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
3378 #define bfd_elf32_get_section_contents rx_get_section_contents
3379 #define bfd_elf32_set_section_contents rx_set_section_contents
3380 #define bfd_elf32_bfd_final_link rx_final_link
3381 #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
3383 #include "elf32-target.h"