1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
3 Contributed by Cygnus Support.
4 Written by Steve Chamberlain, <sac@cygnus.com>.
5 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
7 This file is part of BFD, the Binary File Descriptor library.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
28 #include "coff/internal.h"
31 /* Internal functions. */
32 static bfd_reloc_status_type sh_reloc
33 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
34 static long get_symbol_value PARAMS ((asymbol *));
35 static boolean sh_relax_section
36 PARAMS ((bfd *, asection *, struct bfd_link_info *, boolean *));
37 static boolean sh_relax_delete_bytes
38 PARAMS ((bfd *, asection *, bfd_vma, int));
39 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
40 static boolean sh_align_loads
41 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, boolean *));
42 static boolean sh_swap_insns
43 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
44 static boolean sh_relocate_section
45 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
46 struct internal_reloc *, struct internal_syment *, asection **));
47 static bfd_byte *sh_coff_get_relocated_section_contents
48 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
49 bfd_byte *, boolean, asymbol **));
51 /* Default section alignment to 2**4. */
52 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER (4)
54 /* Generate long file names. */
55 #define COFF_LONG_FILENAMES
57 /* The supported relocations. There are a lot of relocations defined
58 in coff/internal.h which we do not expect to ever see. */
59 static reloc_howto_type sh_coff_howtos[] =
64 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
65 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
66 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
67 EMPTY_HOWTO (6), /* R_SH_IMM24 */
68 EMPTY_HOWTO (7), /* R_SH_LOW16 */
70 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
72 HOWTO (R_SH_PCDISP8BY2, /* type */
74 1, /* size (0 = byte, 1 = short, 2 = long) */
76 true, /* pc_relative */
78 complain_overflow_signed, /* complain_on_overflow */
79 sh_reloc, /* special_function */
80 "r_pcdisp8by2", /* name */
81 true, /* partial_inplace */
84 true), /* pcrel_offset */
86 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
88 HOWTO (R_SH_PCDISP, /* type */
90 1, /* size (0 = byte, 1 = short, 2 = long) */
92 true, /* pc_relative */
94 complain_overflow_signed, /* complain_on_overflow */
95 sh_reloc, /* special_function */
96 "r_pcdisp12by2", /* name */
97 true, /* partial_inplace */
100 true), /* pcrel_offset */
104 HOWTO (R_SH_IMM32, /* type */
106 2, /* size (0 = byte, 1 = short, 2 = long) */
108 false, /* pc_relative */
110 complain_overflow_bitfield, /* complain_on_overflow */
111 sh_reloc, /* special_function */
112 "r_imm32", /* name */
113 true, /* partial_inplace */
114 0xffffffff, /* src_mask */
115 0xffffffff, /* dst_mask */
116 false), /* pcrel_offset */
119 EMPTY_HOWTO (16), /* R_SH_IMM8 */
120 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
121 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
122 EMPTY_HOWTO (19), /* R_SH_IMM4 */
123 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
124 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
126 HOWTO (R_SH_PCRELIMM8BY2, /* type */
128 1, /* size (0 = byte, 1 = short, 2 = long) */
130 true, /* pc_relative */
132 complain_overflow_unsigned, /* complain_on_overflow */
133 sh_reloc, /* special_function */
134 "r_pcrelimm8by2", /* name */
135 true, /* partial_inplace */
138 true), /* pcrel_offset */
140 HOWTO (R_SH_PCRELIMM8BY4, /* type */
142 1, /* size (0 = byte, 1 = short, 2 = long) */
144 true, /* pc_relative */
146 complain_overflow_unsigned, /* complain_on_overflow */
147 sh_reloc, /* special_function */
148 "r_pcrelimm8by4", /* name */
149 true, /* partial_inplace */
152 true), /* pcrel_offset */
154 HOWTO (R_SH_IMM16, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 false, /* pc_relative */
160 complain_overflow_bitfield, /* complain_on_overflow */
161 sh_reloc, /* special_function */
162 "r_imm16", /* name */
163 true, /* partial_inplace */
164 0xffff, /* src_mask */
165 0xffff, /* dst_mask */
166 false), /* pcrel_offset */
168 HOWTO (R_SH_SWITCH16, /* type */
170 1, /* size (0 = byte, 1 = short, 2 = long) */
172 false, /* pc_relative */
174 complain_overflow_bitfield, /* complain_on_overflow */
175 sh_reloc, /* special_function */
176 "r_switch16", /* name */
177 true, /* partial_inplace */
178 0xffff, /* src_mask */
179 0xffff, /* dst_mask */
180 false), /* pcrel_offset */
182 HOWTO (R_SH_SWITCH32, /* type */
184 2, /* size (0 = byte, 1 = short, 2 = long) */
186 false, /* pc_relative */
188 complain_overflow_bitfield, /* complain_on_overflow */
189 sh_reloc, /* special_function */
190 "r_switch32", /* name */
191 true, /* partial_inplace */
192 0xffffffff, /* src_mask */
193 0xffffffff, /* dst_mask */
194 false), /* pcrel_offset */
196 HOWTO (R_SH_USES, /* type */
198 1, /* size (0 = byte, 1 = short, 2 = long) */
200 false, /* pc_relative */
202 complain_overflow_bitfield, /* complain_on_overflow */
203 sh_reloc, /* special_function */
205 true, /* partial_inplace */
206 0xffff, /* src_mask */
207 0xffff, /* dst_mask */
208 false), /* pcrel_offset */
210 HOWTO (R_SH_COUNT, /* type */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
214 false, /* pc_relative */
216 complain_overflow_bitfield, /* complain_on_overflow */
217 sh_reloc, /* special_function */
218 "r_count", /* name */
219 true, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 false), /* pcrel_offset */
224 HOWTO (R_SH_ALIGN, /* type */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
228 false, /* pc_relative */
230 complain_overflow_bitfield, /* complain_on_overflow */
231 sh_reloc, /* special_function */
232 "r_align", /* name */
233 true, /* partial_inplace */
234 0xffffffff, /* src_mask */
235 0xffffffff, /* dst_mask */
236 false), /* pcrel_offset */
238 HOWTO (R_SH_CODE, /* type */
240 2, /* size (0 = byte, 1 = short, 2 = long) */
242 false, /* pc_relative */
244 complain_overflow_bitfield, /* complain_on_overflow */
245 sh_reloc, /* special_function */
247 true, /* partial_inplace */
248 0xffffffff, /* src_mask */
249 0xffffffff, /* dst_mask */
250 false), /* pcrel_offset */
252 HOWTO (R_SH_DATA, /* type */
254 2, /* size (0 = byte, 1 = short, 2 = long) */
256 false, /* pc_relative */
258 complain_overflow_bitfield, /* complain_on_overflow */
259 sh_reloc, /* special_function */
261 true, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 false), /* pcrel_offset */
266 HOWTO (R_SH_LABEL, /* type */
268 2, /* size (0 = byte, 1 = short, 2 = long) */
270 false, /* pc_relative */
272 complain_overflow_bitfield, /* complain_on_overflow */
273 sh_reloc, /* special_function */
274 "r_label", /* name */
275 true, /* partial_inplace */
276 0xffffffff, /* src_mask */
277 0xffffffff, /* dst_mask */
278 false), /* pcrel_offset */
280 HOWTO (R_SH_SWITCH8, /* type */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
284 false, /* pc_relative */
286 complain_overflow_bitfield, /* complain_on_overflow */
287 sh_reloc, /* special_function */
288 "r_switch8", /* name */
289 true, /* partial_inplace */
292 false) /* pcrel_offset */
295 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
297 /* Check for a bad magic number. */
298 #define BADMAG(x) SHBADMAG(x)
300 /* Customize coffcode.h (this is not currently used). */
303 /* FIXME: This should not be set here. */
304 #define __A_MAGIC_SET__
306 /* Swap the r_offset field in and out. */
307 #define SWAP_IN_RELOC_OFFSET bfd_h_get_32
308 #define SWAP_OUT_RELOC_OFFSET bfd_h_put_32
310 /* Swap out extra information in the reloc structure. */
311 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
314 dst->r_stuff[0] = 'S'; \
315 dst->r_stuff[1] = 'C'; \
319 /* Get the value of a symbol, when performing a relocation. */
322 get_symbol_value (symbol)
327 if (bfd_is_com_section (symbol->section))
330 relocation = (symbol->value +
331 symbol->section->output_section->vma +
332 symbol->section->output_offset);
337 /* This macro is used in coffcode.h to get the howto corresponding to
338 an internal reloc. */
340 #define RTYPE2HOWTO(relent, internal) \
342 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
343 ? &sh_coff_howtos[(internal)->r_type] \
344 : (reloc_howto_type *) NULL))
346 /* This is the same as the macro in coffcode.h, except that it copies
347 r_offset into reloc_entry->addend for some relocs. */
348 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
350 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
351 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
352 coffsym = (obj_symbols (abfd) \
353 + (cache_ptr->sym_ptr_ptr - symbols)); \
355 coffsym = coff_symbol_from (abfd, ptr); \
356 if (coffsym != (coff_symbol_type *) NULL \
357 && coffsym->native->u.syment.n_scnum == 0) \
358 cache_ptr->addend = 0; \
359 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
360 && ptr->section != (asection *) NULL) \
361 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
363 cache_ptr->addend = 0; \
364 if ((reloc).r_type == R_SH_SWITCH8 \
365 || (reloc).r_type == R_SH_SWITCH16 \
366 || (reloc).r_type == R_SH_SWITCH32 \
367 || (reloc).r_type == R_SH_USES \
368 || (reloc).r_type == R_SH_COUNT \
369 || (reloc).r_type == R_SH_ALIGN) \
370 cache_ptr->addend = (reloc).r_offset; \
373 /* This is the howto function for the SH relocations. */
375 static bfd_reloc_status_type
376 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
379 arelent *reloc_entry;
382 asection *input_section;
384 char **error_message ATTRIBUTE_UNUSED;
388 unsigned short r_type;
389 bfd_vma addr = reloc_entry->address;
390 bfd_byte *hit_data = addr + (bfd_byte *) data;
392 r_type = reloc_entry->howto->type;
394 if (output_bfd != NULL)
396 /* Partial linking--do nothing. */
397 reloc_entry->address += input_section->output_offset;
401 /* Almost all relocs have to do with relaxing. If any work must be
402 done for them, it has been done in sh_relax_section. */
403 if (r_type != R_SH_IMM32
404 && (r_type != R_SH_PCDISP
405 || (symbol_in->flags & BSF_LOCAL) != 0))
408 if (symbol_in != NULL
409 && bfd_is_und_section (symbol_in->section))
410 return bfd_reloc_undefined;
412 sym_value = get_symbol_value (symbol_in);
417 insn = bfd_get_32 (abfd, hit_data);
418 insn += sym_value + reloc_entry->addend;
419 bfd_put_32 (abfd, insn, hit_data);
422 insn = bfd_get_16 (abfd, hit_data);
423 sym_value += reloc_entry->addend;
424 sym_value -= (input_section->output_section->vma
425 + input_section->output_offset
428 sym_value += (insn & 0xfff) << 1;
431 insn = (insn & 0xf000) | (sym_value & 0xfff);
432 bfd_put_16 (abfd, insn, hit_data);
433 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
434 return bfd_reloc_overflow;
444 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
446 /* We can do relaxing. */
447 #define coff_bfd_relax_section sh_relax_section
449 /* We use the special COFF backend linker. */
450 #define coff_relocate_section sh_relocate_section
452 /* When relaxing, we need to use special code to get the relocated
454 #define coff_bfd_get_relocated_section_contents \
455 sh_coff_get_relocated_section_contents
457 #include "coffcode.h"
459 /* This function handles relaxing on the SH.
461 Function calls on the SH look like this:
470 The compiler and assembler will cooperate to create R_SH_USES
471 relocs on the jsr instructions. The r_offset field of the
472 R_SH_USES reloc is the PC relative offset to the instruction which
473 loads the register (the r_offset field is computed as though it
474 were a jump instruction, so the offset value is actually from four
475 bytes past the instruction). The linker can use this reloc to
476 determine just which function is being called, and thus decide
477 whether it is possible to replace the jsr with a bsr.
479 If multiple function calls are all based on a single register load
480 (i.e., the same function is called multiple times), the compiler
481 guarantees that each function call will have an R_SH_USES reloc.
482 Therefore, if the linker is able to convert each R_SH_USES reloc
483 which refers to that address, it can safely eliminate the register
486 When the assembler creates an R_SH_USES reloc, it examines it to
487 determine which address is being loaded (L1 in the above example).
488 It then counts the number of references to that address, and
489 creates an R_SH_COUNT reloc at that address. The r_offset field of
490 the R_SH_COUNT reloc will be the number of references. If the
491 linker is able to eliminate a register load, it can use the
492 R_SH_COUNT reloc to see whether it can also eliminate the function
495 SH relaxing also handles another, unrelated, matter. On the SH, if
496 a load or store instruction is not aligned on a four byte boundary,
497 the memory cycle interferes with the 32 bit instruction fetch,
498 causing a one cycle bubble in the pipeline. Therefore, we try to
499 align load and store instructions on four byte boundaries if we
500 can, by swapping them with one of the adjacent instructions. */
503 sh_relax_section (abfd, sec, link_info, again)
506 struct bfd_link_info *link_info;
509 struct internal_reloc *internal_relocs;
510 struct internal_reloc *free_relocs = NULL;
512 struct internal_reloc *irel, *irelend;
513 bfd_byte *contents = NULL;
514 bfd_byte *free_contents = NULL;
518 if (link_info->relocateable
519 || (sec->flags & SEC_RELOC) == 0
520 || sec->reloc_count == 0)
523 /* If this is the first time we have been called for this section,
524 initialize the cooked size. */
525 if (sec->_cooked_size == 0)
526 sec->_cooked_size = sec->_raw_size;
528 internal_relocs = (_bfd_coff_read_internal_relocs
529 (abfd, sec, link_info->keep_memory,
530 (bfd_byte *) NULL, false,
531 (struct internal_reloc *) NULL));
532 if (internal_relocs == NULL)
534 if (! link_info->keep_memory)
535 free_relocs = internal_relocs;
539 irelend = internal_relocs + sec->reloc_count;
540 for (irel = internal_relocs; irel < irelend; irel++)
542 bfd_vma laddr, paddr, symval;
544 struct internal_reloc *irelfn, *irelscan, *irelcount;
545 struct internal_syment sym;
548 if (irel->r_type == R_SH_CODE)
551 if (irel->r_type != R_SH_USES)
554 /* Get the section contents. */
555 if (contents == NULL)
557 if (coff_section_data (abfd, sec) != NULL
558 && coff_section_data (abfd, sec)->contents != NULL)
559 contents = coff_section_data (abfd, sec)->contents;
562 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
563 if (contents == NULL)
565 free_contents = contents;
567 if (! bfd_get_section_contents (abfd, sec, contents,
568 (file_ptr) 0, sec->_raw_size))
573 /* The r_offset field of the R_SH_USES reloc will point us to
574 the register load. The 4 is because the r_offset field is
575 computed as though it were a jump offset, which are based
576 from 4 bytes after the jump instruction. */
577 laddr = irel->r_vaddr - sec->vma + 4;
578 /* Careful to sign extend the 32-bit offset. */
579 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
580 if (laddr >= sec->_raw_size)
582 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
583 bfd_get_filename (abfd),
584 (unsigned long) irel->r_vaddr);
587 insn = bfd_get_16 (abfd, contents + laddr);
589 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
590 if ((insn & 0xf000) != 0xd000)
592 ((*_bfd_error_handler)
593 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
594 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr, insn));
598 /* Get the address from which the register is being loaded. The
599 displacement in the mov.l instruction is quadrupled. It is a
600 displacement from four bytes after the movl instruction, but,
601 before adding in the PC address, two least significant bits
602 of the PC are cleared. We assume that the section is aligned
603 on a four byte boundary. */
606 paddr += (laddr + 4) &~ 3;
607 if (paddr >= sec->_raw_size)
609 ((*_bfd_error_handler)
610 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
611 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
615 /* Get the reloc for the address from which the register is
616 being loaded. This reloc will tell us which function is
617 actually being called. */
619 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
620 if (irelfn->r_vaddr == paddr
621 && irelfn->r_type == R_SH_IMM32)
623 if (irelfn >= irelend)
625 ((*_bfd_error_handler)
626 ("%s: 0x%lx: warning: could not find expected reloc",
627 bfd_get_filename (abfd), (unsigned long) paddr));
631 /* Get the value of the symbol referred to by the reloc. */
632 if (! _bfd_coff_get_external_symbols (abfd))
634 bfd_coff_swap_sym_in (abfd,
635 ((bfd_byte *) obj_coff_external_syms (abfd)
637 * bfd_coff_symesz (abfd))),
639 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
641 ((*_bfd_error_handler)
642 ("%s: 0x%lx: warning: symbol in unexpected section",
643 bfd_get_filename (abfd), (unsigned long) paddr));
647 if (sym.n_sclass != C_EXT)
649 symval = (sym.n_value
651 + sec->output_section->vma
652 + sec->output_offset);
656 struct coff_link_hash_entry *h;
658 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
659 BFD_ASSERT (h != NULL);
660 if (h->root.type != bfd_link_hash_defined
661 && h->root.type != bfd_link_hash_defweak)
663 /* This appears to be a reference to an undefined
664 symbol. Just ignore it--it will be caught by the
665 regular reloc processing. */
669 symval = (h->root.u.def.value
670 + h->root.u.def.section->output_section->vma
671 + h->root.u.def.section->output_offset);
674 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
676 /* See if this function call can be shortened. */
680 + sec->output_section->vma
683 if (foff < -0x1000 || foff >= 0x1000)
685 /* After all that work, we can't shorten this function call. */
689 /* Shorten the function call. */
691 /* For simplicity of coding, we are going to modify the section
692 contents, the section relocs, and the BFD symbol table. We
693 must tell the rest of the code not to free up this
694 information. It would be possible to instead create a table
695 of changes which have to be made, as is done in coff-mips.c;
696 that would be more work, but would require less memory when
697 the linker is run. */
699 if (coff_section_data (abfd, sec) == NULL)
702 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
703 if (sec->used_by_bfd == NULL)
707 coff_section_data (abfd, sec)->relocs = internal_relocs;
708 coff_section_data (abfd, sec)->keep_relocs = true;
711 coff_section_data (abfd, sec)->contents = contents;
712 coff_section_data (abfd, sec)->keep_contents = true;
713 free_contents = NULL;
715 obj_coff_keep_syms (abfd) = true;
717 /* Replace the jsr with a bsr. */
719 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
720 replace the jsr with a bsr. */
721 irel->r_type = R_SH_PCDISP;
722 irel->r_symndx = irelfn->r_symndx;
723 if (sym.n_sclass != C_EXT)
725 /* If this needs to be changed because of future relaxing,
726 it will be handled here like other internal PCDISP
729 0xb000 | ((foff >> 1) & 0xfff),
730 contents + irel->r_vaddr - sec->vma);
734 /* We can't fully resolve this yet, because the external
735 symbol value may be changed by future relaxing. We let
736 the final link phase handle it. */
737 bfd_put_16 (abfd, 0xb000, contents + irel->r_vaddr - sec->vma);
740 /* See if there is another R_SH_USES reloc referring to the same
742 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
743 if (irelscan->r_type == R_SH_USES
744 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
746 if (irelscan < irelend)
748 /* Some other function call depends upon this register load,
749 and we have not yet converted that function call.
750 Indeed, we may never be able to convert it. There is
751 nothing else we can do at this point. */
755 /* Look for a R_SH_COUNT reloc on the location where the
756 function address is stored. Do this before deleting any
757 bytes, to avoid confusion about the address. */
758 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
759 if (irelcount->r_vaddr == paddr
760 && irelcount->r_type == R_SH_COUNT)
763 /* Delete the register load. */
764 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
767 /* That will change things, so, just in case it permits some
768 other function call to come within range, we should relax
769 again. Note that this is not required, and it may be slow. */
772 /* Now check whether we got a COUNT reloc. */
773 if (irelcount >= irelend)
775 ((*_bfd_error_handler)
776 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
777 bfd_get_filename (abfd), (unsigned long) paddr));
781 /* The number of uses is stored in the r_offset field. We've
783 if (irelcount->r_offset == 0)
785 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
786 bfd_get_filename (abfd),
787 (unsigned long) paddr));
791 --irelcount->r_offset;
793 /* If there are no more uses, we can delete the address. Reload
794 the address from irelfn, in case it was changed by the
795 previous call to sh_relax_delete_bytes. */
796 if (irelcount->r_offset == 0)
798 if (! sh_relax_delete_bytes (abfd, sec,
799 irelfn->r_vaddr - sec->vma, 4))
803 /* We've done all we can with that function call. */
806 /* Look for load and store instructions that we can align on four
812 /* Get the section contents. */
813 if (contents == NULL)
815 if (coff_section_data (abfd, sec) != NULL
816 && coff_section_data (abfd, sec)->contents != NULL)
817 contents = coff_section_data (abfd, sec)->contents;
820 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
821 if (contents == NULL)
823 free_contents = contents;
825 if (! bfd_get_section_contents (abfd, sec, contents,
826 (file_ptr) 0, sec->_raw_size))
831 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
836 if (coff_section_data (abfd, sec) == NULL)
839 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
840 if (sec->used_by_bfd == NULL)
844 coff_section_data (abfd, sec)->relocs = internal_relocs;
845 coff_section_data (abfd, sec)->keep_relocs = true;
848 coff_section_data (abfd, sec)->contents = contents;
849 coff_section_data (abfd, sec)->keep_contents = true;
850 free_contents = NULL;
852 obj_coff_keep_syms (abfd) = true;
856 if (free_relocs != NULL)
862 if (free_contents != NULL)
864 if (! link_info->keep_memory)
865 free (free_contents);
868 /* Cache the section contents for coff_link_input_bfd. */
869 if (coff_section_data (abfd, sec) == NULL)
872 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
873 if (sec->used_by_bfd == NULL)
875 coff_section_data (abfd, sec)->relocs = NULL;
877 coff_section_data (abfd, sec)->contents = contents;
884 if (free_relocs != NULL)
886 if (free_contents != NULL)
887 free (free_contents);
891 /* Delete some bytes from a section while relaxing. */
894 sh_relax_delete_bytes (abfd, sec, addr, count)
901 struct internal_reloc *irel, *irelend;
902 struct internal_reloc *irelalign;
904 bfd_byte *esym, *esymend;
905 bfd_size_type symesz;
906 struct coff_link_hash_entry **sym_hash;
909 contents = coff_section_data (abfd, sec)->contents;
911 /* The deletion must stop at the next ALIGN reloc for an aligment
912 power larger than the number of bytes we are deleting. */
915 toaddr = sec->_cooked_size;
917 irel = coff_section_data (abfd, sec)->relocs;
918 irelend = irel + sec->reloc_count;
919 for (; irel < irelend; irel++)
921 if (irel->r_type == R_SH_ALIGN
922 && irel->r_vaddr - sec->vma > addr
923 && count < (1 << irel->r_offset))
926 toaddr = irel->r_vaddr - sec->vma;
931 /* Actually delete the bytes. */
932 memmove (contents + addr, contents + addr + count, toaddr - addr - count);
933 if (irelalign == NULL)
934 sec->_cooked_size -= count;
939 #define NOP_OPCODE (0x0009)
941 BFD_ASSERT ((count & 1) == 0);
942 for (i = 0; i < count; i += 2)
943 bfd_put_16 (abfd, NOP_OPCODE, contents + toaddr - count + i);
946 /* Adjust all the relocs. */
947 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
949 bfd_vma nraddr, stop;
952 struct internal_syment sym;
953 int off, adjust, oinsn;
954 bfd_signed_vma voff = 0;
957 /* Get the new reloc address. */
958 nraddr = irel->r_vaddr - sec->vma;
959 if ((irel->r_vaddr - sec->vma > addr
960 && irel->r_vaddr - sec->vma < toaddr)
961 || (irel->r_type == R_SH_ALIGN
962 && irel->r_vaddr - sec->vma == toaddr))
965 /* See if this reloc was for the bytes we have deleted, in which
966 case we no longer care about it. Don't delete relocs which
967 represent addresses, though. */
968 if (irel->r_vaddr - sec->vma >= addr
969 && irel->r_vaddr - sec->vma < addr + count
970 && irel->r_type != R_SH_ALIGN
971 && irel->r_type != R_SH_CODE
972 && irel->r_type != R_SH_DATA
973 && irel->r_type != R_SH_LABEL)
974 irel->r_type = R_SH_UNUSED;
976 /* If this is a PC relative reloc, see if the range it covers
977 includes the bytes we have deleted. */
978 switch (irel->r_type)
983 case R_SH_PCDISP8BY2:
985 case R_SH_PCRELIMM8BY2:
986 case R_SH_PCRELIMM8BY4:
987 start = irel->r_vaddr - sec->vma;
988 insn = bfd_get_16 (abfd, contents + nraddr);
992 switch (irel->r_type)
999 /* If this reloc is against a symbol defined in this
1000 section, and the symbol will not be adjusted below, we
1001 must check the addend to see it will put the value in
1002 range to be adjusted, and hence must be changed. */
1003 bfd_coff_swap_sym_in (abfd,
1004 ((bfd_byte *) obj_coff_external_syms (abfd)
1006 * bfd_coff_symesz (abfd))),
1008 if (sym.n_sclass != C_EXT
1009 && sym.n_scnum == sec->target_index
1010 && ((bfd_vma) sym.n_value <= addr
1011 || (bfd_vma) sym.n_value >= toaddr))
1015 val = bfd_get_32 (abfd, contents + nraddr);
1017 if (val > addr && val < toaddr)
1018 bfd_put_32 (abfd, val - count, contents + nraddr);
1020 start = stop = addr;
1023 case R_SH_PCDISP8BY2:
1027 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1031 bfd_coff_swap_sym_in (abfd,
1032 ((bfd_byte *) obj_coff_external_syms (abfd)
1034 * bfd_coff_symesz (abfd))),
1036 if (sym.n_sclass == C_EXT)
1037 start = stop = addr;
1043 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1047 case R_SH_PCRELIMM8BY2:
1049 stop = start + 4 + off * 2;
1052 case R_SH_PCRELIMM8BY4:
1054 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1060 /* These relocs types represent
1062 The r_offset field holds the difference between the reloc
1063 address and L1. That is the start of the reloc, and
1064 adding in the contents gives us the top. We must adjust
1065 both the r_offset field and the section contents. */
1067 start = irel->r_vaddr - sec->vma;
1068 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1072 && (stop <= addr || stop >= toaddr))
1073 irel->r_offset += count;
1074 else if (stop > addr
1076 && (start <= addr || start >= toaddr))
1077 irel->r_offset -= count;
1081 if (irel->r_type == R_SH_SWITCH16)
1082 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1083 else if (irel->r_type == R_SH_SWITCH8)
1084 voff = bfd_get_8 (abfd, contents + nraddr);
1086 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1087 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1092 start = irel->r_vaddr - sec->vma;
1093 stop = (bfd_vma) ((bfd_signed_vma) start
1094 + (long) irel->r_offset
1101 && (stop <= addr || stop >= toaddr))
1103 else if (stop > addr
1105 && (start <= addr || start >= toaddr))
1114 switch (irel->r_type)
1120 case R_SH_PCDISP8BY2:
1121 case R_SH_PCRELIMM8BY2:
1123 if ((oinsn & 0xff00) != (insn & 0xff00))
1125 bfd_put_16 (abfd, insn, contents + nraddr);
1130 if ((oinsn & 0xf000) != (insn & 0xf000))
1132 bfd_put_16 (abfd, insn, contents + nraddr);
1135 case R_SH_PCRELIMM8BY4:
1136 BFD_ASSERT (adjust == count || count >= 4);
1141 if ((irel->r_vaddr & 3) == 0)
1144 if ((oinsn & 0xff00) != (insn & 0xff00))
1146 bfd_put_16 (abfd, insn, contents + nraddr);
1151 if (voff < 0 || voff >= 0xff)
1153 bfd_put_8 (abfd, voff, contents + nraddr);
1158 if (voff < - 0x8000 || voff >= 0x8000)
1160 bfd_put_signed_16 (abfd, voff, contents + nraddr);
1165 bfd_put_signed_32 (abfd, voff, contents + nraddr);
1169 irel->r_offset += adjust;
1175 ((*_bfd_error_handler)
1176 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1177 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
1178 bfd_set_error (bfd_error_bad_value);
1183 irel->r_vaddr = nraddr + sec->vma;
1186 /* Look through all the other sections. If there contain any IMM32
1187 relocs against internal symbols which we are not going to adjust
1188 below, we may need to adjust the addends. */
1189 for (o = abfd->sections; o != NULL; o = o->next)
1191 struct internal_reloc *internal_relocs;
1192 struct internal_reloc *irelscan, *irelscanend;
1193 bfd_byte *ocontents;
1196 || (o->flags & SEC_RELOC) == 0
1197 || o->reloc_count == 0)
1200 /* We always cache the relocs. Perhaps, if info->keep_memory is
1201 false, we should free them, if we are permitted to, when we
1202 leave sh_coff_relax_section. */
1203 internal_relocs = (_bfd_coff_read_internal_relocs
1204 (abfd, o, true, (bfd_byte *) NULL, false,
1205 (struct internal_reloc *) NULL));
1206 if (internal_relocs == NULL)
1210 irelscanend = internal_relocs + o->reloc_count;
1211 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1213 struct internal_syment sym;
1215 if (irelscan->r_type != R_SH_IMM32)
1218 bfd_coff_swap_sym_in (abfd,
1219 ((bfd_byte *) obj_coff_external_syms (abfd)
1220 + (irelscan->r_symndx
1221 * bfd_coff_symesz (abfd))),
1223 if (sym.n_sclass != C_EXT
1224 && sym.n_scnum == sec->target_index
1225 && ((bfd_vma) sym.n_value <= addr
1226 || (bfd_vma) sym.n_value >= toaddr))
1230 if (ocontents == NULL)
1232 if (coff_section_data (abfd, o)->contents != NULL)
1233 ocontents = coff_section_data (abfd, o)->contents;
1236 /* We always cache the section contents.
1237 Perhaps, if info->keep_memory is false, we
1238 should free them, if we are permitted to,
1239 when we leave sh_coff_relax_section. */
1240 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1241 if (ocontents == NULL)
1243 if (! bfd_get_section_contents (abfd, o, ocontents,
1247 coff_section_data (abfd, o)->contents = ocontents;
1251 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1253 if (val > addr && val < toaddr)
1254 bfd_put_32 (abfd, val - count,
1255 ocontents + irelscan->r_vaddr - o->vma);
1257 coff_section_data (abfd, o)->keep_contents = true;
1262 /* Adjusting the internal symbols will not work if something has
1263 already retrieved the generic symbols. It would be possible to
1264 make this work by adjusting the generic symbols at the same time.
1265 However, this case should not arise in normal usage. */
1266 if (obj_symbols (abfd) != NULL
1267 || obj_raw_syments (abfd) != NULL)
1269 ((*_bfd_error_handler)
1270 ("%s: fatal: generic symbols retrieved before relaxing",
1271 bfd_get_filename (abfd)));
1272 bfd_set_error (bfd_error_invalid_operation);
1276 /* Adjust all the symbols. */
1277 sym_hash = obj_coff_sym_hashes (abfd);
1278 symesz = bfd_coff_symesz (abfd);
1279 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1280 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1281 while (esym < esymend)
1283 struct internal_syment isym;
1285 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1287 if (isym.n_scnum == sec->target_index
1288 && (bfd_vma) isym.n_value > addr
1289 && (bfd_vma) isym.n_value < toaddr)
1291 isym.n_value -= count;
1293 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1295 if (*sym_hash != NULL)
1297 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1298 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1299 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1300 && (*sym_hash)->root.u.def.value < toaddr);
1301 (*sym_hash)->root.u.def.value -= count;
1305 esym += (isym.n_numaux + 1) * symesz;
1306 sym_hash += isym.n_numaux + 1;
1309 /* See if we can move the ALIGN reloc forward. We have adjusted
1310 r_vaddr for it already. */
1311 if (irelalign != NULL)
1313 bfd_vma alignto, alignaddr;
1315 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1316 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1317 1 << irelalign->r_offset);
1318 if (alignto != alignaddr)
1320 /* Tail recursion. */
1321 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1322 alignto - alignaddr);
1329 /* This is yet another version of the SH opcode table, used to rapidly
1330 get information about a particular instruction. */
1332 /* The opcode map is represented by an array of these structures. The
1333 array is indexed by the high order four bits in the instruction. */
1335 struct sh_major_opcode
1337 /* A pointer to the instruction list. This is an array which
1338 contains all the instructions with this major opcode. */
1339 const struct sh_minor_opcode *minor_opcodes;
1340 /* The number of elements in minor_opcodes. */
1341 unsigned short count;
1344 /* This structure holds information for a set of SH opcodes. The
1345 instruction code is anded with the mask value, and the resulting
1346 value is used to search the order opcode list. */
1348 struct sh_minor_opcode
1350 /* The sorted opcode list. */
1351 const struct sh_opcode *opcodes;
1352 /* The number of elements in opcodes. */
1353 unsigned short count;
1354 /* The mask value to use when searching the opcode list. */
1355 unsigned short mask;
1358 /* This structure holds information for an SH instruction. An array
1359 of these structures is sorted in order by opcode. */
1363 /* The code for this instruction, after it has been anded with the
1364 mask value in the sh_major_opcode structure. */
1365 unsigned short opcode;
1366 /* Flags for this instruction. */
1367 unsigned short flags;
1370 /* Flag which appear in the sh_opcode structure. */
1372 /* This instruction loads a value from memory. */
1375 /* This instruction stores a value to memory. */
1378 /* This instruction is a branch. */
1379 #define BRANCH (0x4)
1381 /* This instruction has a delay slot. */
1384 /* This instruction uses the value in the register in the field at
1385 mask 0x0f00 of the instruction. */
1386 #define USES1 (0x10)
1388 /* This instruction uses the value in the register in the field at
1389 mask 0x00f0 of the instruction. */
1390 #define USES2 (0x20)
1392 /* This instruction uses the value in register 0. */
1393 #define USESR0 (0x40)
1395 /* This instruction sets the value in the register in the field at
1396 mask 0x0f00 of the instruction. */
1397 #define SETS1 (0x80)
1399 /* This instruction sets the value in the register in the field at
1400 mask 0x00f0 of the instruction. */
1401 #define SETS2 (0x100)
1403 /* This instruction sets register 0. */
1404 #define SETSR0 (0x200)
1406 /* This instruction sets a special register. */
1407 #define SETSSP (0x400)
1409 /* This instruction uses a special register. */
1410 #define USESSP (0x800)
1412 /* This instruction uses the floating point register in the field at
1413 mask 0x0f00 of the instruction. */
1414 #define USESF1 (0x1000)
1416 /* This instruction uses the floating point register in the field at
1417 mask 0x00f0 of the instruction. */
1418 #define USESF2 (0x2000)
1420 /* This instruction uses floating point register 0. */
1421 #define USESF0 (0x4000)
1423 /* This instruction sets the floating point register in the field at
1424 mask 0x0f00 of the instruction. */
1425 #define SETSF1 (0x8000)
1427 static boolean sh_insn_uses_reg
1428 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1429 static boolean sh_insn_uses_freg
1430 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1431 static boolean sh_insns_conflict
1432 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1433 const struct sh_opcode *));
1434 static boolean sh_load_use
1435 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1436 const struct sh_opcode *));
1438 /* The opcode maps. */
1440 #define MAP(a) a, sizeof a / sizeof a[0]
1442 static const struct sh_opcode sh_opcode00[] =
1444 { 0x0008, SETSSP }, /* clrt */
1445 { 0x0009, 0 }, /* nop */
1446 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1447 { 0x0018, SETSSP }, /* sett */
1448 { 0x0019, SETSSP }, /* div0u */
1449 { 0x001b, 0 }, /* sleep */
1450 { 0x0028, SETSSP }, /* clrmac */
1451 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1452 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1453 { 0x0048, SETSSP }, /* clrs */
1454 { 0x0058, SETSSP } /* sets */
1457 static const struct sh_opcode sh_opcode01[] =
1459 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1460 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1461 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1462 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1463 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1464 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1465 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1466 { 0x0029, SETS1 | USESSP }, /* movt rn */
1467 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1468 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1469 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1470 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1471 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn */
1472 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1473 { 0x0083, LOAD | USES1 }, /* pref @rn */
1474 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1475 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1476 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1477 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1478 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1479 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1480 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1483 static const struct sh_opcode sh_opcode02[] =
1485 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1486 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1487 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1488 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1489 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1490 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1491 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1492 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1495 static const struct sh_minor_opcode sh_opcode0[] =
1497 { MAP (sh_opcode00), 0xffff },
1498 { MAP (sh_opcode01), 0xf0ff },
1499 { MAP (sh_opcode02), 0xf00f }
1502 static const struct sh_opcode sh_opcode10[] =
1504 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1507 static const struct sh_minor_opcode sh_opcode1[] =
1509 { MAP (sh_opcode10), 0xf000 }
1512 static const struct sh_opcode sh_opcode20[] =
1514 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1515 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1516 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1517 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1518 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1519 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1520 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1521 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1522 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1523 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1524 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1525 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1526 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1527 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1528 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1531 static const struct sh_minor_opcode sh_opcode2[] =
1533 { MAP (sh_opcode20), 0xf00f }
1536 static const struct sh_opcode sh_opcode30[] =
1538 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1539 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1540 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1541 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1542 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1543 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1544 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1545 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1546 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1547 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1548 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1549 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1550 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1551 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1554 static const struct sh_minor_opcode sh_opcode3[] =
1556 { MAP (sh_opcode30), 0xf00f }
1559 static const struct sh_opcode sh_opcode40[] =
1561 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1562 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1563 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1564 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1565 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1566 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1567 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1568 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1569 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1570 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1571 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1572 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1573 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1574 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1575 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1576 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1577 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1578 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1579 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1580 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1581 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1582 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1583 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1584 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1585 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1586 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1587 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1588 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1589 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1590 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1591 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1592 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1593 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1594 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1595 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1596 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1597 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1598 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1599 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1600 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1601 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1602 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1603 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1604 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1605 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1606 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1607 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1608 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr,@-rn */
1609 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr */
1610 { 0x406a, SETSSP | USES1 } /* lds rm,fpscr */
1613 static const struct sh_opcode sh_opcode41[] =
1615 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l rx_bank,@-rn */
1616 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rx_bank */
1617 { 0x408e, SETSSP | USES1 } /* ldc rm,rx_bank */
1620 static const struct sh_opcode sh_opcode42[] =
1622 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1623 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1624 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1627 static const struct sh_minor_opcode sh_opcode4[] =
1629 { MAP (sh_opcode40), 0xf0ff },
1630 { MAP (sh_opcode41), 0xf08f },
1631 { MAP (sh_opcode42), 0xf00f }
1634 static const struct sh_opcode sh_opcode50[] =
1636 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1639 static const struct sh_minor_opcode sh_opcode5[] =
1641 { MAP (sh_opcode50), 0xf000 }
1644 static const struct sh_opcode sh_opcode60[] =
1646 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1647 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1648 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1649 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1650 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1651 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1652 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1653 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1654 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1655 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1656 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1657 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1658 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1659 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1660 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1661 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1664 static const struct sh_minor_opcode sh_opcode6[] =
1666 { MAP (sh_opcode60), 0xf00f }
1669 static const struct sh_opcode sh_opcode70[] =
1671 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1674 static const struct sh_minor_opcode sh_opcode7[] =
1676 { MAP (sh_opcode70), 0xf000 }
1679 static const struct sh_opcode sh_opcode80[] =
1681 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1682 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1683 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1684 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1685 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1686 { 0x8900, BRANCH | USESSP }, /* bt label */
1687 { 0x8b00, BRANCH | USESSP }, /* bf label */
1688 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1689 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1692 static const struct sh_minor_opcode sh_opcode8[] =
1694 { MAP (sh_opcode80), 0xff00 }
1697 static const struct sh_opcode sh_opcode90[] =
1699 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1702 static const struct sh_minor_opcode sh_opcode9[] =
1704 { MAP (sh_opcode90), 0xf000 }
1707 static const struct sh_opcode sh_opcodea0[] =
1709 { 0xa000, BRANCH | DELAY } /* bra label */
1712 static const struct sh_minor_opcode sh_opcodea[] =
1714 { MAP (sh_opcodea0), 0xf000 }
1717 static const struct sh_opcode sh_opcodeb0[] =
1719 { 0xb000, BRANCH | DELAY } /* bsr label */
1722 static const struct sh_minor_opcode sh_opcodeb[] =
1724 { MAP (sh_opcodeb0), 0xf000 }
1727 static const struct sh_opcode sh_opcodec0[] =
1729 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1730 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1731 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1732 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1733 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1734 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1735 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1736 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1737 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1738 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1739 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1740 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1741 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1742 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1743 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1744 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1747 static const struct sh_minor_opcode sh_opcodec[] =
1749 { MAP (sh_opcodec0), 0xff00 }
1752 static const struct sh_opcode sh_opcoded0[] =
1754 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1757 static const struct sh_minor_opcode sh_opcoded[] =
1759 { MAP (sh_opcoded0), 0xf000 }
1762 static const struct sh_opcode sh_opcodee0[] =
1764 { 0xe000, SETS1 } /* mov #imm,rn */
1767 static const struct sh_minor_opcode sh_opcodee[] =
1769 { MAP (sh_opcodee0), 0xf000 }
1772 static const struct sh_opcode sh_opcodef0[] =
1774 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1775 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1776 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1777 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1778 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1779 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1780 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1781 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1782 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1783 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1784 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1785 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1786 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1787 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1790 static const struct sh_opcode sh_opcodef1[] =
1792 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1793 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1794 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1795 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1796 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1797 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1798 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
1799 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
1800 { 0xf08d, SETSF1 }, /* fldi0 fn */
1801 { 0xf09d, SETSF1 } /* fldi1 fn */
1804 static const struct sh_minor_opcode sh_opcodef[] =
1806 { MAP (sh_opcodef0), 0xf00f },
1807 { MAP (sh_opcodef1), 0xf0ff }
1810 static const struct sh_major_opcode sh_opcodes[] =
1812 { MAP (sh_opcode0) },
1813 { MAP (sh_opcode1) },
1814 { MAP (sh_opcode2) },
1815 { MAP (sh_opcode3) },
1816 { MAP (sh_opcode4) },
1817 { MAP (sh_opcode5) },
1818 { MAP (sh_opcode6) },
1819 { MAP (sh_opcode7) },
1820 { MAP (sh_opcode8) },
1821 { MAP (sh_opcode9) },
1822 { MAP (sh_opcodea) },
1823 { MAP (sh_opcodeb) },
1824 { MAP (sh_opcodec) },
1825 { MAP (sh_opcoded) },
1826 { MAP (sh_opcodee) },
1827 { MAP (sh_opcodef) }
1830 /* Given an instruction, return a pointer to the corresponding
1831 sh_opcode structure. Return NULL if the instruction is not
1834 static const struct sh_opcode *
1838 const struct sh_major_opcode *maj;
1839 const struct sh_minor_opcode *min, *minend;
1841 maj = &sh_opcodes[(insn & 0xf000) >> 12];
1842 min = maj->minor_opcodes;
1843 minend = min + maj->count;
1844 for (; min < minend; min++)
1847 const struct sh_opcode *op, *opend;
1849 l = insn & min->mask;
1851 opend = op + min->count;
1853 /* Since the opcodes tables are sorted, we could use a binary
1854 search here if the count were above some cutoff value. */
1855 for (; op < opend; op++)
1856 if (op->opcode == l)
1863 /* See whether an instruction uses a general purpose register. */
1866 sh_insn_uses_reg (insn, op, reg)
1868 const struct sh_opcode *op;
1875 if ((f & USES1) != 0
1876 && ((insn & 0x0f00) >> 8) == reg)
1878 if ((f & USES2) != 0
1879 && ((insn & 0x00f0) >> 4) == reg)
1881 if ((f & USESR0) != 0
1888 /* See whether an instruction uses a floating point register. */
1891 sh_insn_uses_freg (insn, op, freg)
1893 const struct sh_opcode *op;
1900 /* We can't tell if this is a double-precision insn, so just play safe
1901 and assume that it might be. So not only have we test FREG against
1902 itself, but also even FREG against FREG+1 - if the using insn uses
1903 just the low part of a double precision value - but also an odd
1904 FREG against FREG-1 - if the setting insn sets just the low part
1905 of a double precision value.
1906 So what this all boils down to is that we have to ignore the lowest
1907 bit of the register number. */
1909 if ((f & USESF1) != 0
1910 && ((insn & 0x0e00) >> 8) == (freg & 0xe))
1912 if ((f & USESF2) != 0
1913 && ((insn & 0x00e0) >> 4) == (freg & 0xe))
1915 if ((f & USESF0) != 0
1922 /* See whether instructions I1 and I2 conflict, assuming I1 comes
1923 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
1924 This should return true if there is a conflict, or false if the
1925 instructions can be swapped safely. */
1928 sh_insns_conflict (i1, op1, i2, op2)
1930 const struct sh_opcode *op1;
1932 const struct sh_opcode *op2;
1934 unsigned int f1, f2;
1939 /* Load of fpscr conflicts with floating point operations.
1940 FIXME: shouldn't test raw opcodes here. */
1941 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
1942 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
1945 if ((f1 & (BRANCH | DELAY)) != 0
1946 || (f2 & (BRANCH | DELAY)) != 0)
1949 if ((f1 & SETSSP) != 0 && (f2 & USESSP) != 0)
1951 if ((f2 & SETSSP) != 0 && (f1 & USESSP) != 0)
1954 if ((f1 & SETS1) != 0
1955 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
1957 if ((f1 & SETS2) != 0
1958 && sh_insn_uses_reg (i2, op2, (i1 & 0x00f0) >> 4))
1960 if ((f1 & SETSR0) != 0
1961 && sh_insn_uses_reg (i2, op2, 0))
1963 if ((f1 & SETSF1) != 0
1964 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
1967 if ((f2 & SETS1) != 0
1968 && sh_insn_uses_reg (i1, op1, (i2 & 0x0f00) >> 8))
1970 if ((f2 & SETS2) != 0
1971 && sh_insn_uses_reg (i1, op1, (i2 & 0x00f0) >> 4))
1973 if ((f2 & SETSR0) != 0
1974 && sh_insn_uses_reg (i1, op1, 0))
1976 if ((f2 & SETSF1) != 0
1977 && sh_insn_uses_freg (i1, op1, (i2 & 0x0f00) >> 8))
1980 /* The instructions do not conflict. */
1984 /* I1 is a load instruction, and I2 is some other instruction. Return
1985 true if I1 loads a register which I2 uses. */
1988 sh_load_use (i1, op1, i2, op2)
1990 const struct sh_opcode *op1;
1992 const struct sh_opcode *op2;
1998 if ((f1 & LOAD) == 0)
2001 /* If both SETS1 and SETSSP are set, that means a load to a special
2002 register using postincrement addressing mode, which we don't care
2004 if ((f1 & SETS1) != 0
2005 && (f1 & SETSSP) == 0
2006 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2009 if ((f1 & SETSR0) != 0
2010 && sh_insn_uses_reg (i2, op2, 0))
2013 if ((f1 & SETSF1) != 0
2014 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2020 /* Try to align loads and stores within a span of memory. This is
2021 called by both the ELF and the COFF sh targets. ABFD and SEC are
2022 the BFD and section we are examining. CONTENTS is the contents of
2023 the section. SWAP is the routine to call to swap two instructions.
2024 RELOCS is a pointer to the internal relocation information, to be
2025 passed to SWAP. PLABEL is a pointer to the current label in a
2026 sorted list of labels; LABEL_END is the end of the list. START and
2027 STOP are the range of memory to examine. If a swap is made,
2028 *PSWAPPED is set to true. */
2031 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2032 plabel, label_end, start, stop, pswapped)
2036 boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2046 /* Instructions should be aligned on 2 byte boundaries. */
2047 if ((start & 1) == 1)
2050 /* Now look through the unaligned addresses. */
2054 for (; i < stop; i += 4)
2057 const struct sh_opcode *op;
2058 unsigned int prev_insn = 0;
2059 const struct sh_opcode *prev_op = NULL;
2061 insn = bfd_get_16 (abfd, contents + i);
2062 op = sh_insn_info (insn);
2064 || (op->flags & (LOAD | STORE)) == 0)
2067 /* This is a load or store which is not on a four byte boundary. */
2069 while (*plabel < label_end && **plabel < i)
2074 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2075 prev_op = sh_insn_info (prev_insn);
2077 /* If the load/store instruction is in a delay slot, we
2080 || (prev_op->flags & DELAY) != 0)
2084 && (*plabel >= label_end || **plabel != i)
2086 && (prev_op->flags & (LOAD | STORE)) == 0
2087 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2091 /* The load/store instruction does not have a label, and
2092 there is a previous instruction; PREV_INSN is not
2093 itself a load/store instruction, and PREV_INSN and
2094 INSN do not conflict. */
2100 unsigned int prev2_insn;
2101 const struct sh_opcode *prev2_op;
2103 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2104 prev2_op = sh_insn_info (prev2_insn);
2106 /* If the instruction before PREV_INSN has a delay
2107 slot--that is, PREV_INSN is in a delay slot--we
2109 if (prev2_op == NULL
2110 || (prev2_op->flags & DELAY) != 0)
2113 /* If the instruction before PREV_INSN is a load,
2114 and it sets a register which INSN uses, then
2115 putting INSN immediately after PREV_INSN will
2116 cause a pipeline bubble, so there is no point to
2119 && (prev2_op->flags & LOAD) != 0
2120 && sh_load_use (prev2_insn, prev2_op, insn, op))
2126 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2133 while (*plabel < label_end && **plabel < i + 2)
2137 && (*plabel >= label_end || **plabel != i + 2))
2139 unsigned int next_insn;
2140 const struct sh_opcode *next_op;
2142 /* There is an instruction after the load/store
2143 instruction, and it does not have a label. */
2144 next_insn = bfd_get_16 (abfd, contents + i + 2);
2145 next_op = sh_insn_info (next_insn);
2147 && (next_op->flags & (LOAD | STORE)) == 0
2148 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2152 /* NEXT_INSN is not itself a load/store instruction,
2153 and it does not conflict with INSN. */
2157 /* If PREV_INSN is a load, and it sets a register
2158 which NEXT_INSN uses, then putting NEXT_INSN
2159 immediately after PREV_INSN will cause a pipeline
2160 bubble, so there is no reason to make this swap. */
2162 && (prev_op->flags & LOAD) != 0
2163 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2166 /* If INSN is a load, and it sets a register which
2167 the insn after NEXT_INSN uses, then doing the
2168 swap will cause a pipeline bubble, so there is no
2169 reason to make the swap. However, if the insn
2170 after NEXT_INSN is itself a load or store
2171 instruction, then it is misaligned, so
2172 optimistically hope that it will be swapped
2173 itself, and just live with the pipeline bubble if
2177 && (op->flags & LOAD) != 0)
2179 unsigned int next2_insn;
2180 const struct sh_opcode *next2_op;
2182 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2183 next2_op = sh_insn_info (next2_insn);
2184 if ((next2_op->flags & (LOAD | STORE)) == 0
2185 && sh_load_use (insn, op, next2_insn, next2_op))
2191 if (! (*swap) (abfd, sec, relocs, contents, i))
2203 /* Look for loads and stores which we can align to four byte
2204 boundaries. See the longer comment above sh_relax_section for why
2205 this is desirable. This sets *PSWAPPED if some instruction was
2209 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2212 struct internal_reloc *internal_relocs;
2216 struct internal_reloc *irel, *irelend;
2217 bfd_vma *labels = NULL;
2218 bfd_vma *label, *label_end;
2222 irelend = internal_relocs + sec->reloc_count;
2224 /* Get all the addresses with labels on them. */
2225 labels = (bfd_vma *) bfd_malloc (sec->reloc_count * sizeof (bfd_vma));
2229 for (irel = internal_relocs; irel < irelend; irel++)
2231 if (irel->r_type == R_SH_LABEL)
2233 *label_end = irel->r_vaddr - sec->vma;
2238 /* Note that the assembler currently always outputs relocs in
2239 address order. If that ever changes, this code will need to sort
2240 the label values and the relocs. */
2244 for (irel = internal_relocs; irel < irelend; irel++)
2246 bfd_vma start, stop;
2248 if (irel->r_type != R_SH_CODE)
2251 start = irel->r_vaddr - sec->vma;
2253 for (irel++; irel < irelend; irel++)
2254 if (irel->r_type == R_SH_DATA)
2257 stop = irel->r_vaddr - sec->vma;
2259 stop = sec->_cooked_size;
2261 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2262 (PTR) internal_relocs, &label,
2263 label_end, start, stop, pswapped))
2277 /* Swap two SH instructions. */
2280 sh_swap_insns (abfd, sec, relocs, contents, addr)
2287 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2288 unsigned short i1, i2;
2289 struct internal_reloc *irel, *irelend;
2291 /* Swap the instructions themselves. */
2292 i1 = bfd_get_16 (abfd, contents + addr);
2293 i2 = bfd_get_16 (abfd, contents + addr + 2);
2294 bfd_put_16 (abfd, i2, contents + addr);
2295 bfd_put_16 (abfd, i1, contents + addr + 2);
2297 /* Adjust all reloc addresses. */
2298 irelend = internal_relocs + sec->reloc_count;
2299 for (irel = internal_relocs; irel < irelend; irel++)
2303 /* There are a few special types of relocs that we don't want to
2304 adjust. These relocs do not apply to the instruction itself,
2305 but are only associated with the address. */
2306 type = irel->r_type;
2307 if (type == R_SH_ALIGN
2308 || type == R_SH_CODE
2309 || type == R_SH_DATA
2310 || type == R_SH_LABEL)
2313 /* If an R_SH_USES reloc points to one of the addresses being
2314 swapped, we must adjust it. It would be incorrect to do this
2315 for a jump, though, since we want to execute both
2316 instructions after the jump. (We have avoided swapping
2317 around a label, so the jump will not wind up executing an
2318 instruction it shouldn't). */
2319 if (type == R_SH_USES)
2323 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2325 irel->r_offset += 2;
2326 else if (off == addr + 2)
2327 irel->r_offset -= 2;
2330 if (irel->r_vaddr - sec->vma == addr)
2335 else if (irel->r_vaddr - sec->vma == addr + 2)
2346 unsigned short insn, oinsn;
2349 loc = contents + irel->r_vaddr - sec->vma;
2356 case R_SH_PCDISP8BY2:
2357 case R_SH_PCRELIMM8BY2:
2358 insn = bfd_get_16 (abfd, loc);
2361 if ((oinsn & 0xff00) != (insn & 0xff00))
2363 bfd_put_16 (abfd, insn, loc);
2367 insn = bfd_get_16 (abfd, loc);
2370 if ((oinsn & 0xf000) != (insn & 0xf000))
2372 bfd_put_16 (abfd, insn, loc);
2375 case R_SH_PCRELIMM8BY4:
2376 /* This reloc ignores the least significant 3 bits of
2377 the program counter before adding in the offset.
2378 This means that if ADDR is at an even address, the
2379 swap will not affect the offset. If ADDR is an at an
2380 odd address, then the instruction will be crossing a
2381 four byte boundary, and must be adjusted. */
2382 if ((addr & 3) != 0)
2384 insn = bfd_get_16 (abfd, loc);
2387 if ((oinsn & 0xff00) != (insn & 0xff00))
2389 bfd_put_16 (abfd, insn, loc);
2397 ((*_bfd_error_handler)
2398 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2399 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
2400 bfd_set_error (bfd_error_bad_value);
2409 /* This is a modification of _bfd_coff_generic_relocate_section, which
2410 will handle SH relaxing. */
2413 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2414 relocs, syms, sections)
2415 bfd *output_bfd ATTRIBUTE_UNUSED;
2416 struct bfd_link_info *info;
2418 asection *input_section;
2420 struct internal_reloc *relocs;
2421 struct internal_syment *syms;
2422 asection **sections;
2424 struct internal_reloc *rel;
2425 struct internal_reloc *relend;
2428 relend = rel + input_section->reloc_count;
2429 for (; rel < relend; rel++)
2432 struct coff_link_hash_entry *h;
2433 struct internal_syment *sym;
2436 reloc_howto_type *howto;
2437 bfd_reloc_status_type rstat;
2439 /* Almost all relocs have to do with relaxing. If any work must
2440 be done for them, it has been done in sh_relax_section. */
2441 if (rel->r_type != R_SH_IMM32
2442 && rel->r_type != R_SH_PCDISP)
2445 symndx = rel->r_symndx;
2455 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2457 (*_bfd_error_handler)
2458 ("%s: illegal symbol index %ld in relocs",
2459 bfd_get_filename (input_bfd), symndx);
2460 bfd_set_error (bfd_error_bad_value);
2463 h = obj_coff_sym_hashes (input_bfd)[symndx];
2464 sym = syms + symndx;
2467 if (sym != NULL && sym->n_scnum != 0)
2468 addend = - sym->n_value;
2472 if (rel->r_type == R_SH_PCDISP)
2475 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2478 howto = &sh_coff_howtos[rel->r_type];
2482 bfd_set_error (bfd_error_bad_value);
2492 /* There is nothing to do for an internal PCDISP reloc. */
2493 if (rel->r_type == R_SH_PCDISP)
2498 sec = bfd_abs_section_ptr;
2503 sec = sections[symndx];
2504 val = (sec->output_section->vma
2505 + sec->output_offset
2512 if (h->root.type == bfd_link_hash_defined
2513 || h->root.type == bfd_link_hash_defweak)
2517 sec = h->root.u.def.section;
2518 val = (h->root.u.def.value
2519 + sec->output_section->vma
2520 + sec->output_offset);
2522 else if (! info->relocateable)
2524 if (! ((*info->callbacks->undefined_symbol)
2525 (info, h->root.root.string, input_bfd, input_section,
2526 rel->r_vaddr - input_section->vma)))
2531 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2533 rel->r_vaddr - input_section->vma,
2542 case bfd_reloc_overflow:
2545 char buf[SYMNMLEN + 1];
2550 name = h->root.root.string;
2551 else if (sym->_n._n_n._n_zeroes == 0
2552 && sym->_n._n_n._n_offset != 0)
2553 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2556 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2557 buf[SYMNMLEN] = '\0';
2561 if (! ((*info->callbacks->reloc_overflow)
2562 (info, name, howto->name, (bfd_vma) 0, input_bfd,
2563 input_section, rel->r_vaddr - input_section->vma)))
2572 /* This is a version of bfd_generic_get_relocated_section_contents
2573 which uses sh_relocate_section. */
2576 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2577 data, relocateable, symbols)
2579 struct bfd_link_info *link_info;
2580 struct bfd_link_order *link_order;
2582 boolean relocateable;
2585 asection *input_section = link_order->u.indirect.section;
2586 bfd *input_bfd = input_section->owner;
2587 asection **sections = NULL;
2588 struct internal_reloc *internal_relocs = NULL;
2589 struct internal_syment *internal_syms = NULL;
2591 /* We only need to handle the case of relaxing, or of having a
2592 particular set of section contents, specially. */
2594 || coff_section_data (input_bfd, input_section) == NULL
2595 || coff_section_data (input_bfd, input_section)->contents == NULL)
2596 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2601 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2602 input_section->_raw_size);
2604 if ((input_section->flags & SEC_RELOC) != 0
2605 && input_section->reloc_count > 0)
2607 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2608 bfd_byte *esym, *esymend;
2609 struct internal_syment *isymp;
2612 if (! _bfd_coff_get_external_symbols (input_bfd))
2615 internal_relocs = (_bfd_coff_read_internal_relocs
2616 (input_bfd, input_section, false, (bfd_byte *) NULL,
2617 false, (struct internal_reloc *) NULL));
2618 if (internal_relocs == NULL)
2621 internal_syms = ((struct internal_syment *)
2622 bfd_malloc (obj_raw_syment_count (input_bfd)
2623 * sizeof (struct internal_syment)));
2624 if (internal_syms == NULL)
2627 sections = (asection **) bfd_malloc (obj_raw_syment_count (input_bfd)
2628 * sizeof (asection *));
2629 if (sections == NULL)
2632 isymp = internal_syms;
2634 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2635 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2636 while (esym < esymend)
2638 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
2640 if (isymp->n_scnum != 0)
2641 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2644 if (isymp->n_value == 0)
2645 *secpp = bfd_und_section_ptr;
2647 *secpp = bfd_com_section_ptr;
2650 esym += (isymp->n_numaux + 1) * symesz;
2651 secpp += isymp->n_numaux + 1;
2652 isymp += isymp->n_numaux + 1;
2655 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2656 input_section, data, internal_relocs,
2657 internal_syms, sections))
2662 free (internal_syms);
2663 internal_syms = NULL;
2664 free (internal_relocs);
2665 internal_relocs = NULL;
2671 if (internal_relocs != NULL)
2672 free (internal_relocs);
2673 if (internal_syms != NULL)
2674 free (internal_syms);
2675 if (sections != NULL)
2680 /* The target vectors. */
2682 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL)
2684 #ifdef TARGET_SHL_SYM
2685 #define TARGET_SYM TARGET_SHL_SYM
2687 #define TARGET_SYM shlcoff_vec
2690 #ifndef TARGET_SHL_NAME
2691 #define TARGET_SHL_NAME "coff-shl"
2694 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE, 0, '_', NULL)
2697 /* Some people want versions of the SH COFF target which do not align
2698 to 16 byte boundaries. We implement that by adding a couple of new
2699 target vectors. These are just like the ones above, but they
2700 change the default section alignment. To generate them in the
2701 assembler, use -small. To use them in the linker, use -b
2702 coff-sh{l}-small and -oformat coff-sh{l}-small.
2704 Yes, this is a horrible hack. A general solution for setting
2705 section alignment in COFF is rather complex. ELF handles this
2708 /* Only recognize the small versions if the target was not defaulted.
2709 Otherwise we won't recognize the non default endianness. */
2711 static const bfd_target *
2712 coff_small_object_p (abfd)
2715 if (abfd->target_defaulted)
2717 bfd_set_error (bfd_error_wrong_format);
2720 return coff_object_p (abfd);
2723 /* Set the section alignment for the small versions. */
2726 coff_small_new_section_hook (abfd, section)
2730 if (! coff_new_section_hook (abfd, section))
2733 /* We must align to at least a four byte boundary, because longword
2734 accesses must be on a four byte boundary. */
2735 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
2736 section->alignment_power = 2;
2741 /* This is copied from bfd_coff_std_swap_table so that we can change
2742 the default section alignment power. */
2744 static const bfd_coff_backend_data bfd_coff_small_swap_table =
2746 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
2747 coff_swap_aux_out, coff_swap_sym_out,
2748 coff_swap_lineno_out, coff_swap_reloc_out,
2749 coff_swap_filehdr_out, coff_swap_aouthdr_out,
2750 coff_swap_scnhdr_out,
2751 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ,
2752 #ifdef COFF_LONG_FILENAMES
2757 #ifdef COFF_LONG_SECTION_NAMES
2763 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
2764 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
2765 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
2766 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
2767 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
2768 coff_classify_symbol, coff_compute_section_file_positions,
2769 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
2770 coff_adjust_symndx, coff_link_add_one_symbol,
2771 coff_link_output_has_begun, coff_final_link_postscript
2774 #define coff_small_close_and_cleanup \
2775 coff_close_and_cleanup
2776 #define coff_small_bfd_free_cached_info \
2777 coff_bfd_free_cached_info
2778 #define coff_small_get_section_contents \
2779 coff_get_section_contents
2780 #define coff_small_get_section_contents_in_window \
2781 coff_get_section_contents_in_window
2783 extern const bfd_target shlcoff_small_vec;
2785 const bfd_target shcoff_small_vec =
2787 "coff-sh-small", /* name */
2788 bfd_target_coff_flavour,
2789 BFD_ENDIAN_BIG, /* data byte order is big */
2790 BFD_ENDIAN_BIG, /* header byte order is big */
2792 (HAS_RELOC | EXEC_P | /* object flags */
2793 HAS_LINENO | HAS_DEBUG |
2794 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
2796 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
2797 '_', /* leading symbol underscore */
2798 '/', /* ar_pad_char */
2799 15, /* ar_max_namelen */
2800 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
2801 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
2802 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
2803 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
2804 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
2805 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
2807 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
2808 bfd_generic_archive_p, _bfd_dummy_target},
2809 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
2811 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
2812 _bfd_write_archive_contents, bfd_false},
2814 BFD_JUMP_TABLE_GENERIC (coff_small),
2815 BFD_JUMP_TABLE_COPY (coff),
2816 BFD_JUMP_TABLE_CORE (_bfd_nocore),
2817 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
2818 BFD_JUMP_TABLE_SYMBOLS (coff),
2819 BFD_JUMP_TABLE_RELOCS (coff),
2820 BFD_JUMP_TABLE_WRITE (coff),
2821 BFD_JUMP_TABLE_LINK (coff),
2822 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
2824 & shlcoff_small_vec,
2826 (PTR) &bfd_coff_small_swap_table
2829 const bfd_target shlcoff_small_vec =
2831 "coff-shl-small", /* name */
2832 bfd_target_coff_flavour,
2833 BFD_ENDIAN_LITTLE, /* data byte order is little */
2834 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
2836 (HAS_RELOC | EXEC_P | /* object flags */
2837 HAS_LINENO | HAS_DEBUG |
2838 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
2840 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
2841 '_', /* leading symbol underscore */
2842 '/', /* ar_pad_char */
2843 15, /* ar_max_namelen */
2844 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
2845 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
2846 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
2847 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
2848 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
2849 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
2851 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
2852 bfd_generic_archive_p, _bfd_dummy_target},
2853 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
2855 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
2856 _bfd_write_archive_contents, bfd_false},
2858 BFD_JUMP_TABLE_GENERIC (coff_small),
2859 BFD_JUMP_TABLE_COPY (coff),
2860 BFD_JUMP_TABLE_CORE (_bfd_nocore),
2861 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
2862 BFD_JUMP_TABLE_SYMBOLS (coff),
2863 BFD_JUMP_TABLE_RELOCS (coff),
2864 BFD_JUMP_TABLE_WRITE (coff),
2865 BFD_JUMP_TABLE_LINK (coff),
2866 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
2870 (PTR) &bfd_coff_small_swap_table