1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 94, 95, 96, 97, 98, 1999, 2000 Free Software Foundation, Inc.
3 Contributed by Cygnus Support.
4 Written by Steve Chamberlain, <sac@cygnus.com>.
5 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
7 This file is part of BFD, the Binary File Descriptor library.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
28 #include "coff/internal.h"
33 #ifndef COFF_IMAGE_WITH_PE
34 static boolean sh_align_load_span
35 PARAMS ((bfd *, asection *, bfd_byte *,
36 boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
37 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, boolean *));
39 #define _bfd_sh_align_load_span sh_align_load_span
45 /* Internal functions. */
46 static bfd_reloc_status_type sh_reloc
47 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
48 static long get_symbol_value PARAMS ((asymbol *));
49 static boolean sh_relax_section
50 PARAMS ((bfd *, asection *, struct bfd_link_info *, boolean *));
51 static boolean sh_relax_delete_bytes
52 PARAMS ((bfd *, asection *, bfd_vma, int));
53 #ifndef COFF_IMAGE_WITH_PE
54 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
56 static boolean sh_align_loads
57 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, boolean *));
58 static boolean sh_swap_insns
59 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
60 static boolean sh_relocate_section
61 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
62 struct internal_reloc *, struct internal_syment *, asection **));
63 static bfd_byte *sh_coff_get_relocated_section_contents
64 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
65 bfd_byte *, boolean, asymbol **));
68 /* Can't build import tables with 2**4 alignment. */
69 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
71 /* Default section alignment to 2**4. */
72 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
75 #ifdef COFF_IMAGE_WITH_PE
76 /* Align PE executables. */
77 #define COFF_PAGE_SIZE 0x1000
80 /* Generate long file names. */
81 #define COFF_LONG_FILENAMES
84 /* Return true if this relocation should
85 appear in the output .reloc section. */
86 static boolean in_reloc_p (abfd, howto)
87 bfd * abfd ATTRIBUTE_UNUSED;
88 reloc_howto_type * howto;
90 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
94 /* The supported relocations. There are a lot of relocations defined
95 in coff/internal.h which we do not expect to ever see. */
96 static reloc_howto_type sh_coff_howtos[] =
102 HOWTO (R_SH_IMM32CE, /* type */
104 2, /* size (0 = byte, 1 = short, 2 = long) */
106 false, /* pc_relative */
108 complain_overflow_bitfield, /* complain_on_overflow */
109 sh_reloc, /* special_function */
110 "r_imm32ce", /* name */
111 true, /* partial_inplace */
112 0xffffffff, /* src_mask */
113 0xffffffff, /* dst_mask */
114 false), /* pcrel_offset */
118 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
119 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
120 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
121 EMPTY_HOWTO (6), /* R_SH_IMM24 */
122 EMPTY_HOWTO (7), /* R_SH_LOW16 */
124 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
126 HOWTO (R_SH_PCDISP8BY2, /* type */
128 1, /* size (0 = byte, 1 = short, 2 = long) */
130 true, /* pc_relative */
132 complain_overflow_signed, /* complain_on_overflow */
133 sh_reloc, /* special_function */
134 "r_pcdisp8by2", /* name */
135 true, /* partial_inplace */
138 true), /* pcrel_offset */
140 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
142 HOWTO (R_SH_PCDISP, /* type */
144 1, /* size (0 = byte, 1 = short, 2 = long) */
146 true, /* pc_relative */
148 complain_overflow_signed, /* complain_on_overflow */
149 sh_reloc, /* special_function */
150 "r_pcdisp12by2", /* name */
151 true, /* partial_inplace */
152 0xfff, /* src_mask */
153 0xfff, /* dst_mask */
154 true), /* pcrel_offset */
158 HOWTO (R_SH_IMM32, /* type */
160 2, /* size (0 = byte, 1 = short, 2 = long) */
162 false, /* pc_relative */
164 complain_overflow_bitfield, /* complain_on_overflow */
165 sh_reloc, /* special_function */
166 "r_imm32", /* name */
167 true, /* partial_inplace */
168 0xffffffff, /* src_mask */
169 0xffffffff, /* dst_mask */
170 false), /* pcrel_offset */
174 HOWTO (R_SH_IMAGEBASE, /* type */
176 2, /* size (0 = byte, 1 = short, 2 = long) */
178 false, /* pc_relative */
180 complain_overflow_bitfield, /* complain_on_overflow */
181 sh_reloc, /* special_function */
183 true, /* partial_inplace */
184 0xffffffff, /* src_mask */
185 0xffffffff, /* dst_mask */
186 false), /* pcrel_offset */
188 EMPTY_HOWTO (16), /* R_SH_IMM8 */
190 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
191 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
192 EMPTY_HOWTO (19), /* R_SH_IMM4 */
193 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
194 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
196 HOWTO (R_SH_PCRELIMM8BY2, /* type */
198 1, /* size (0 = byte, 1 = short, 2 = long) */
200 true, /* pc_relative */
202 complain_overflow_unsigned, /* complain_on_overflow */
203 sh_reloc, /* special_function */
204 "r_pcrelimm8by2", /* name */
205 true, /* partial_inplace */
208 true), /* pcrel_offset */
210 HOWTO (R_SH_PCRELIMM8BY4, /* type */
212 1, /* size (0 = byte, 1 = short, 2 = long) */
214 true, /* pc_relative */
216 complain_overflow_unsigned, /* complain_on_overflow */
217 sh_reloc, /* special_function */
218 "r_pcrelimm8by4", /* name */
219 true, /* partial_inplace */
222 true), /* pcrel_offset */
224 HOWTO (R_SH_IMM16, /* type */
226 1, /* size (0 = byte, 1 = short, 2 = long) */
228 false, /* pc_relative */
230 complain_overflow_bitfield, /* complain_on_overflow */
231 sh_reloc, /* special_function */
232 "r_imm16", /* name */
233 true, /* partial_inplace */
234 0xffff, /* src_mask */
235 0xffff, /* dst_mask */
236 false), /* pcrel_offset */
238 HOWTO (R_SH_SWITCH16, /* type */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
242 false, /* pc_relative */
244 complain_overflow_bitfield, /* complain_on_overflow */
245 sh_reloc, /* special_function */
246 "r_switch16", /* name */
247 true, /* partial_inplace */
248 0xffff, /* src_mask */
249 0xffff, /* dst_mask */
250 false), /* pcrel_offset */
252 HOWTO (R_SH_SWITCH32, /* type */
254 2, /* size (0 = byte, 1 = short, 2 = long) */
256 false, /* pc_relative */
258 complain_overflow_bitfield, /* complain_on_overflow */
259 sh_reloc, /* special_function */
260 "r_switch32", /* name */
261 true, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 false), /* pcrel_offset */
266 HOWTO (R_SH_USES, /* type */
268 1, /* size (0 = byte, 1 = short, 2 = long) */
270 false, /* pc_relative */
272 complain_overflow_bitfield, /* complain_on_overflow */
273 sh_reloc, /* special_function */
275 true, /* partial_inplace */
276 0xffff, /* src_mask */
277 0xffff, /* dst_mask */
278 false), /* pcrel_offset */
280 HOWTO (R_SH_COUNT, /* type */
282 2, /* size (0 = byte, 1 = short, 2 = long) */
284 false, /* pc_relative */
286 complain_overflow_bitfield, /* complain_on_overflow */
287 sh_reloc, /* special_function */
288 "r_count", /* name */
289 true, /* partial_inplace */
290 0xffffffff, /* src_mask */
291 0xffffffff, /* dst_mask */
292 false), /* pcrel_offset */
294 HOWTO (R_SH_ALIGN, /* type */
296 2, /* size (0 = byte, 1 = short, 2 = long) */
298 false, /* pc_relative */
300 complain_overflow_bitfield, /* complain_on_overflow */
301 sh_reloc, /* special_function */
302 "r_align", /* name */
303 true, /* partial_inplace */
304 0xffffffff, /* src_mask */
305 0xffffffff, /* dst_mask */
306 false), /* pcrel_offset */
308 HOWTO (R_SH_CODE, /* type */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
312 false, /* pc_relative */
314 complain_overflow_bitfield, /* complain_on_overflow */
315 sh_reloc, /* special_function */
317 true, /* partial_inplace */
318 0xffffffff, /* src_mask */
319 0xffffffff, /* dst_mask */
320 false), /* pcrel_offset */
322 HOWTO (R_SH_DATA, /* type */
324 2, /* size (0 = byte, 1 = short, 2 = long) */
326 false, /* pc_relative */
328 complain_overflow_bitfield, /* complain_on_overflow */
329 sh_reloc, /* special_function */
331 true, /* partial_inplace */
332 0xffffffff, /* src_mask */
333 0xffffffff, /* dst_mask */
334 false), /* pcrel_offset */
336 HOWTO (R_SH_LABEL, /* type */
338 2, /* size (0 = byte, 1 = short, 2 = long) */
340 false, /* pc_relative */
342 complain_overflow_bitfield, /* complain_on_overflow */
343 sh_reloc, /* special_function */
344 "r_label", /* name */
345 true, /* partial_inplace */
346 0xffffffff, /* src_mask */
347 0xffffffff, /* dst_mask */
348 false), /* pcrel_offset */
350 HOWTO (R_SH_SWITCH8, /* type */
352 0, /* size (0 = byte, 1 = short, 2 = long) */
354 false, /* pc_relative */
356 complain_overflow_bitfield, /* complain_on_overflow */
357 sh_reloc, /* special_function */
358 "r_switch8", /* name */
359 true, /* partial_inplace */
362 false) /* pcrel_offset */
365 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
367 /* Check for a bad magic number. */
368 #define BADMAG(x) SHBADMAG(x)
370 /* Customize coffcode.h (this is not currently used). */
373 /* FIXME: This should not be set here. */
374 #define __A_MAGIC_SET__
377 /* Swap the r_offset field in and out. */
378 #define SWAP_IN_RELOC_OFFSET bfd_h_get_32
379 #define SWAP_OUT_RELOC_OFFSET bfd_h_put_32
381 /* Swap out extra information in the reloc structure. */
382 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
385 dst->r_stuff[0] = 'S'; \
386 dst->r_stuff[1] = 'C'; \
391 /* Get the value of a symbol, when performing a relocation. */
394 get_symbol_value (symbol)
399 if (bfd_is_com_section (symbol->section))
402 relocation = (symbol->value +
403 symbol->section->output_section->vma +
404 symbol->section->output_offset);
410 /* Convert an rtype to howto for the COFF backend linker.
411 Copied from coff-i386. */
412 #define coff_rtype_to_howto coff_sh_rtype_to_howto
414 static reloc_howto_type *
415 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
416 bfd * abfd ATTRIBUTE_UNUSED;
418 struct internal_reloc * rel;
419 struct coff_link_hash_entry * h;
420 struct internal_syment * sym;
423 reloc_howto_type * howto;
425 howto = sh_coff_howtos + rel->r_type;
429 if (howto->pc_relative)
430 *addendp += sec->vma;
432 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
434 /* This is a common symbol. The section contents include the
435 size (sym->n_value) as an addend. The relocate_section
436 function will be adding in the final value of the symbol. We
437 need to subtract out the current size in order to get the
439 BFD_ASSERT (h != NULL);
442 if (howto->pc_relative)
446 /* If the symbol is defined, then the generic code is going to
447 add back the symbol value in order to cancel out an
448 adjustment it made to the addend. However, we set the addend
449 to 0 at the start of this function. We need to adjust here,
450 to avoid the adjustment the generic code will make. FIXME:
451 This is getting a bit hackish. */
452 if (sym != NULL && sym->n_scnum != 0)
453 *addendp -= sym->n_value;
456 if (rel->r_type == R_SH_IMAGEBASE)
457 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
462 /* This structure is used to map BFD reloc codes to SH PE relocs. */
463 struct shcoff_reloc_map
465 unsigned char bfd_reloc_val;
466 unsigned char shcoff_reloc_val;
469 /* An array mapping BFD reloc codes to SH PE relocs. */
470 static const struct shcoff_reloc_map sh_reloc_map[] =
472 { BFD_RELOC_32, R_SH_IMM32CE },
473 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
474 { BFD_RELOC_CTOR, R_SH_IMM32CE },
477 /* Given a BFD reloc code, return the howto structure for the
478 corresponding SH PE reloc. */
479 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
481 static reloc_howto_type *
482 sh_coff_reloc_type_lookup (abfd, code)
483 bfd * abfd ATTRIBUTE_UNUSED;
484 bfd_reloc_code_real_type code;
488 for (i = 0; i < sizeof (sh_reloc_map) / sizeof (struct shcoff_reloc_map); i++)
490 if (sh_reloc_map[i].bfd_reloc_val == code)
491 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
494 fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
497 #endif /* COFF_WITH_PE */
499 /* This macro is used in coffcode.h to get the howto corresponding to
500 an internal reloc. */
502 #define RTYPE2HOWTO(relent, internal) \
504 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
505 ? &sh_coff_howtos[(internal)->r_type] \
506 : (reloc_howto_type *) NULL))
508 /* This is the same as the macro in coffcode.h, except that it copies
509 r_offset into reloc_entry->addend for some relocs. */
510 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
512 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
513 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
514 coffsym = (obj_symbols (abfd) \
515 + (cache_ptr->sym_ptr_ptr - symbols)); \
517 coffsym = coff_symbol_from (abfd, ptr); \
518 if (coffsym != (coff_symbol_type *) NULL \
519 && coffsym->native->u.syment.n_scnum == 0) \
520 cache_ptr->addend = 0; \
521 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
522 && ptr->section != (asection *) NULL) \
523 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
525 cache_ptr->addend = 0; \
526 if ((reloc).r_type == R_SH_SWITCH8 \
527 || (reloc).r_type == R_SH_SWITCH16 \
528 || (reloc).r_type == R_SH_SWITCH32 \
529 || (reloc).r_type == R_SH_USES \
530 || (reloc).r_type == R_SH_COUNT \
531 || (reloc).r_type == R_SH_ALIGN) \
532 cache_ptr->addend = (reloc).r_offset; \
535 /* This is the howto function for the SH relocations. */
537 static bfd_reloc_status_type
538 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
541 arelent *reloc_entry;
544 asection *input_section;
546 char **error_message ATTRIBUTE_UNUSED;
550 unsigned short r_type;
551 bfd_vma addr = reloc_entry->address;
552 bfd_byte *hit_data = addr + (bfd_byte *) data;
554 r_type = reloc_entry->howto->type;
556 if (output_bfd != NULL)
558 /* Partial linking--do nothing. */
559 reloc_entry->address += input_section->output_offset;
563 /* Almost all relocs have to do with relaxing. If any work must be
564 done for them, it has been done in sh_relax_section. */
565 if (r_type != R_SH_IMM32
567 && r_type != R_SH_IMM32CE
568 && r_type != R_SH_IMAGEBASE
570 && (r_type != R_SH_PCDISP
571 || (symbol_in->flags & BSF_LOCAL) != 0))
574 if (symbol_in != NULL
575 && bfd_is_und_section (symbol_in->section))
576 return bfd_reloc_undefined;
578 sym_value = get_symbol_value (symbol_in);
586 insn = bfd_get_32 (abfd, hit_data);
587 insn += sym_value + reloc_entry->addend;
588 bfd_put_32 (abfd, insn, hit_data);
592 insn = bfd_get_32 (abfd, hit_data);
593 insn += (sym_value + reloc_entry->addend
594 - pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase);
595 bfd_put_32 (abfd, insn, hit_data);
599 insn = bfd_get_16 (abfd, hit_data);
600 sym_value += reloc_entry->addend;
601 sym_value -= (input_section->output_section->vma
602 + input_section->output_offset
605 sym_value += (insn & 0xfff) << 1;
608 insn = (insn & 0xf000) | (sym_value & 0xfff);
609 bfd_put_16 (abfd, insn, hit_data);
610 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
611 return bfd_reloc_overflow;
621 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
623 /* We can do relaxing. */
624 #define coff_bfd_relax_section sh_relax_section
626 /* We use the special COFF backend linker. */
627 #define coff_relocate_section sh_relocate_section
629 /* When relaxing, we need to use special code to get the relocated
631 #define coff_bfd_get_relocated_section_contents \
632 sh_coff_get_relocated_section_contents
634 #include "coffcode.h"
636 /* This function handles relaxing on the SH.
638 Function calls on the SH look like this:
647 The compiler and assembler will cooperate to create R_SH_USES
648 relocs on the jsr instructions. The r_offset field of the
649 R_SH_USES reloc is the PC relative offset to the instruction which
650 loads the register (the r_offset field is computed as though it
651 were a jump instruction, so the offset value is actually from four
652 bytes past the instruction). The linker can use this reloc to
653 determine just which function is being called, and thus decide
654 whether it is possible to replace the jsr with a bsr.
656 If multiple function calls are all based on a single register load
657 (i.e., the same function is called multiple times), the compiler
658 guarantees that each function call will have an R_SH_USES reloc.
659 Therefore, if the linker is able to convert each R_SH_USES reloc
660 which refers to that address, it can safely eliminate the register
663 When the assembler creates an R_SH_USES reloc, it examines it to
664 determine which address is being loaded (L1 in the above example).
665 It then counts the number of references to that address, and
666 creates an R_SH_COUNT reloc at that address. The r_offset field of
667 the R_SH_COUNT reloc will be the number of references. If the
668 linker is able to eliminate a register load, it can use the
669 R_SH_COUNT reloc to see whether it can also eliminate the function
672 SH relaxing also handles another, unrelated, matter. On the SH, if
673 a load or store instruction is not aligned on a four byte boundary,
674 the memory cycle interferes with the 32 bit instruction fetch,
675 causing a one cycle bubble in the pipeline. Therefore, we try to
676 align load and store instructions on four byte boundaries if we
677 can, by swapping them with one of the adjacent instructions. */
680 sh_relax_section (abfd, sec, link_info, again)
683 struct bfd_link_info *link_info;
686 struct internal_reloc *internal_relocs;
687 struct internal_reloc *free_relocs = NULL;
689 struct internal_reloc *irel, *irelend;
690 bfd_byte *contents = NULL;
691 bfd_byte *free_contents = NULL;
695 if (link_info->relocateable
696 || (sec->flags & SEC_RELOC) == 0
697 || sec->reloc_count == 0)
700 /* If this is the first time we have been called for this section,
701 initialize the cooked size. */
702 if (sec->_cooked_size == 0)
703 sec->_cooked_size = sec->_raw_size;
705 internal_relocs = (_bfd_coff_read_internal_relocs
706 (abfd, sec, link_info->keep_memory,
707 (bfd_byte *) NULL, false,
708 (struct internal_reloc *) NULL));
709 if (internal_relocs == NULL)
711 if (! link_info->keep_memory)
712 free_relocs = internal_relocs;
716 irelend = internal_relocs + sec->reloc_count;
717 for (irel = internal_relocs; irel < irelend; irel++)
719 bfd_vma laddr, paddr, symval;
721 struct internal_reloc *irelfn, *irelscan, *irelcount;
722 struct internal_syment sym;
725 if (irel->r_type == R_SH_CODE)
728 if (irel->r_type != R_SH_USES)
731 /* Get the section contents. */
732 if (contents == NULL)
734 if (coff_section_data (abfd, sec) != NULL
735 && coff_section_data (abfd, sec)->contents != NULL)
736 contents = coff_section_data (abfd, sec)->contents;
739 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
740 if (contents == NULL)
742 free_contents = contents;
744 if (! bfd_get_section_contents (abfd, sec, contents,
745 (file_ptr) 0, sec->_raw_size))
750 /* The r_offset field of the R_SH_USES reloc will point us to
751 the register load. The 4 is because the r_offset field is
752 computed as though it were a jump offset, which are based
753 from 4 bytes after the jump instruction. */
754 laddr = irel->r_vaddr - sec->vma + 4;
755 /* Careful to sign extend the 32-bit offset. */
756 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
757 if (laddr >= sec->_raw_size)
759 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
760 bfd_get_filename (abfd),
761 (unsigned long) irel->r_vaddr);
764 insn = bfd_get_16 (abfd, contents + laddr);
766 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
767 if ((insn & 0xf000) != 0xd000)
769 ((*_bfd_error_handler)
770 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
771 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr, insn));
775 /* Get the address from which the register is being loaded. The
776 displacement in the mov.l instruction is quadrupled. It is a
777 displacement from four bytes after the movl instruction, but,
778 before adding in the PC address, two least significant bits
779 of the PC are cleared. We assume that the section is aligned
780 on a four byte boundary. */
783 paddr += (laddr + 4) &~ 3;
784 if (paddr >= sec->_raw_size)
786 ((*_bfd_error_handler)
787 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
788 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
792 /* Get the reloc for the address from which the register is
793 being loaded. This reloc will tell us which function is
794 actually being called. */
796 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
797 if (irelfn->r_vaddr == paddr
799 && (irelfn->r_type == R_SH_IMM32
800 || irelfn->r_type == R_SH_IMM32CE
801 || irelfn->r_type == R_SH_IMAGEBASE))
804 && irelfn->r_type == R_SH_IMM32)
807 if (irelfn >= irelend)
809 ((*_bfd_error_handler)
810 ("%s: 0x%lx: warning: could not find expected reloc",
811 bfd_get_filename (abfd), (unsigned long) paddr));
815 /* Get the value of the symbol referred to by the reloc. */
816 if (! _bfd_coff_get_external_symbols (abfd))
818 bfd_coff_swap_sym_in (abfd,
819 ((bfd_byte *) obj_coff_external_syms (abfd)
821 * bfd_coff_symesz (abfd))),
823 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
825 ((*_bfd_error_handler)
826 ("%s: 0x%lx: warning: symbol in unexpected section",
827 bfd_get_filename (abfd), (unsigned long) paddr));
831 if (sym.n_sclass != C_EXT)
833 symval = (sym.n_value
835 + sec->output_section->vma
836 + sec->output_offset);
840 struct coff_link_hash_entry *h;
842 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
843 BFD_ASSERT (h != NULL);
844 if (h->root.type != bfd_link_hash_defined
845 && h->root.type != bfd_link_hash_defweak)
847 /* This appears to be a reference to an undefined
848 symbol. Just ignore it--it will be caught by the
849 regular reloc processing. */
853 symval = (h->root.u.def.value
854 + h->root.u.def.section->output_section->vma
855 + h->root.u.def.section->output_offset);
858 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
860 /* See if this function call can be shortened. */
864 + sec->output_section->vma
867 if (foff < -0x1000 || foff >= 0x1000)
869 /* After all that work, we can't shorten this function call. */
873 /* Shorten the function call. */
875 /* For simplicity of coding, we are going to modify the section
876 contents, the section relocs, and the BFD symbol table. We
877 must tell the rest of the code not to free up this
878 information. It would be possible to instead create a table
879 of changes which have to be made, as is done in coff-mips.c;
880 that would be more work, but would require less memory when
881 the linker is run. */
883 if (coff_section_data (abfd, sec) == NULL)
886 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
887 if (sec->used_by_bfd == NULL)
891 coff_section_data (abfd, sec)->relocs = internal_relocs;
892 coff_section_data (abfd, sec)->keep_relocs = true;
895 coff_section_data (abfd, sec)->contents = contents;
896 coff_section_data (abfd, sec)->keep_contents = true;
897 free_contents = NULL;
899 obj_coff_keep_syms (abfd) = true;
901 /* Replace the jsr with a bsr. */
903 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
904 replace the jsr with a bsr. */
905 irel->r_type = R_SH_PCDISP;
906 irel->r_symndx = irelfn->r_symndx;
907 if (sym.n_sclass != C_EXT)
909 /* If this needs to be changed because of future relaxing,
910 it will be handled here like other internal PCDISP
913 0xb000 | ((foff >> 1) & 0xfff),
914 contents + irel->r_vaddr - sec->vma);
918 /* We can't fully resolve this yet, because the external
919 symbol value may be changed by future relaxing. We let
920 the final link phase handle it. */
921 bfd_put_16 (abfd, 0xb000, contents + irel->r_vaddr - sec->vma);
924 /* See if there is another R_SH_USES reloc referring to the same
926 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
927 if (irelscan->r_type == R_SH_USES
928 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
930 if (irelscan < irelend)
932 /* Some other function call depends upon this register load,
933 and we have not yet converted that function call.
934 Indeed, we may never be able to convert it. There is
935 nothing else we can do at this point. */
939 /* Look for a R_SH_COUNT reloc on the location where the
940 function address is stored. Do this before deleting any
941 bytes, to avoid confusion about the address. */
942 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
943 if (irelcount->r_vaddr == paddr
944 && irelcount->r_type == R_SH_COUNT)
947 /* Delete the register load. */
948 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
951 /* That will change things, so, just in case it permits some
952 other function call to come within range, we should relax
953 again. Note that this is not required, and it may be slow. */
956 /* Now check whether we got a COUNT reloc. */
957 if (irelcount >= irelend)
959 ((*_bfd_error_handler)
960 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
961 bfd_get_filename (abfd), (unsigned long) paddr));
965 /* The number of uses is stored in the r_offset field. We've
967 if (irelcount->r_offset == 0)
969 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
970 bfd_get_filename (abfd),
971 (unsigned long) paddr));
975 --irelcount->r_offset;
977 /* If there are no more uses, we can delete the address. Reload
978 the address from irelfn, in case it was changed by the
979 previous call to sh_relax_delete_bytes. */
980 if (irelcount->r_offset == 0)
982 if (! sh_relax_delete_bytes (abfd, sec,
983 irelfn->r_vaddr - sec->vma, 4))
987 /* We've done all we can with that function call. */
990 /* Look for load and store instructions that we can align on four
996 /* Get the section contents. */
997 if (contents == NULL)
999 if (coff_section_data (abfd, sec) != NULL
1000 && coff_section_data (abfd, sec)->contents != NULL)
1001 contents = coff_section_data (abfd, sec)->contents;
1004 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
1005 if (contents == NULL)
1007 free_contents = contents;
1009 if (! bfd_get_section_contents (abfd, sec, contents,
1010 (file_ptr) 0, sec->_raw_size))
1015 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1020 if (coff_section_data (abfd, sec) == NULL)
1023 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
1024 if (sec->used_by_bfd == NULL)
1028 coff_section_data (abfd, sec)->relocs = internal_relocs;
1029 coff_section_data (abfd, sec)->keep_relocs = true;
1032 coff_section_data (abfd, sec)->contents = contents;
1033 coff_section_data (abfd, sec)->keep_contents = true;
1034 free_contents = NULL;
1036 obj_coff_keep_syms (abfd) = true;
1040 if (free_relocs != NULL)
1046 if (free_contents != NULL)
1048 if (! link_info->keep_memory)
1049 free (free_contents);
1052 /* Cache the section contents for coff_link_input_bfd. */
1053 if (coff_section_data (abfd, sec) == NULL)
1056 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
1057 if (sec->used_by_bfd == NULL)
1059 coff_section_data (abfd, sec)->relocs = NULL;
1061 coff_section_data (abfd, sec)->contents = contents;
1068 if (free_relocs != NULL)
1070 if (free_contents != NULL)
1071 free (free_contents);
1075 /* Delete some bytes from a section while relaxing. */
1078 sh_relax_delete_bytes (abfd, sec, addr, count)
1085 struct internal_reloc *irel, *irelend;
1086 struct internal_reloc *irelalign;
1088 bfd_byte *esym, *esymend;
1089 bfd_size_type symesz;
1090 struct coff_link_hash_entry **sym_hash;
1093 contents = coff_section_data (abfd, sec)->contents;
1095 /* The deletion must stop at the next ALIGN reloc for an aligment
1096 power larger than the number of bytes we are deleting. */
1099 toaddr = sec->_cooked_size;
1101 irel = coff_section_data (abfd, sec)->relocs;
1102 irelend = irel + sec->reloc_count;
1103 for (; irel < irelend; irel++)
1105 if (irel->r_type == R_SH_ALIGN
1106 && irel->r_vaddr - sec->vma > addr
1107 && count < (1 << irel->r_offset))
1110 toaddr = irel->r_vaddr - sec->vma;
1115 /* Actually delete the bytes. */
1116 memmove (contents + addr, contents + addr + count, toaddr - addr - count);
1117 if (irelalign == NULL)
1118 sec->_cooked_size -= count;
1123 #define NOP_OPCODE (0x0009)
1125 BFD_ASSERT ((count & 1) == 0);
1126 for (i = 0; i < count; i += 2)
1127 bfd_put_16 (abfd, NOP_OPCODE, contents + toaddr - count + i);
1130 /* Adjust all the relocs. */
1131 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1133 bfd_vma nraddr, stop;
1136 struct internal_syment sym;
1137 int off, adjust, oinsn;
1138 bfd_signed_vma voff = 0;
1141 /* Get the new reloc address. */
1142 nraddr = irel->r_vaddr - sec->vma;
1143 if ((irel->r_vaddr - sec->vma > addr
1144 && irel->r_vaddr - sec->vma < toaddr)
1145 || (irel->r_type == R_SH_ALIGN
1146 && irel->r_vaddr - sec->vma == toaddr))
1149 /* See if this reloc was for the bytes we have deleted, in which
1150 case we no longer care about it. Don't delete relocs which
1151 represent addresses, though. */
1152 if (irel->r_vaddr - sec->vma >= addr
1153 && irel->r_vaddr - sec->vma < addr + count
1154 && irel->r_type != R_SH_ALIGN
1155 && irel->r_type != R_SH_CODE
1156 && irel->r_type != R_SH_DATA
1157 && irel->r_type != R_SH_LABEL)
1158 irel->r_type = R_SH_UNUSED;
1160 /* If this is a PC relative reloc, see if the range it covers
1161 includes the bytes we have deleted. */
1162 switch (irel->r_type)
1167 case R_SH_PCDISP8BY2:
1169 case R_SH_PCRELIMM8BY2:
1170 case R_SH_PCRELIMM8BY4:
1171 start = irel->r_vaddr - sec->vma;
1172 insn = bfd_get_16 (abfd, contents + nraddr);
1176 switch (irel->r_type)
1179 start = stop = addr;
1185 case R_SH_IMAGEBASE:
1187 /* If this reloc is against a symbol defined in this
1188 section, and the symbol will not be adjusted below, we
1189 must check the addend to see it will put the value in
1190 range to be adjusted, and hence must be changed. */
1191 bfd_coff_swap_sym_in (abfd,
1192 ((bfd_byte *) obj_coff_external_syms (abfd)
1194 * bfd_coff_symesz (abfd))),
1196 if (sym.n_sclass != C_EXT
1197 && sym.n_scnum == sec->target_index
1198 && ((bfd_vma) sym.n_value <= addr
1199 || (bfd_vma) sym.n_value >= toaddr))
1203 val = bfd_get_32 (abfd, contents + nraddr);
1205 if (val > addr && val < toaddr)
1206 bfd_put_32 (abfd, val - count, contents + nraddr);
1208 start = stop = addr;
1211 case R_SH_PCDISP8BY2:
1215 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1219 bfd_coff_swap_sym_in (abfd,
1220 ((bfd_byte *) obj_coff_external_syms (abfd)
1222 * bfd_coff_symesz (abfd))),
1224 if (sym.n_sclass == C_EXT)
1225 start = stop = addr;
1231 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1235 case R_SH_PCRELIMM8BY2:
1237 stop = start + 4 + off * 2;
1240 case R_SH_PCRELIMM8BY4:
1242 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1248 /* These relocs types represent
1250 The r_offset field holds the difference between the reloc
1251 address and L1. That is the start of the reloc, and
1252 adding in the contents gives us the top. We must adjust
1253 both the r_offset field and the section contents. */
1255 start = irel->r_vaddr - sec->vma;
1256 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1260 && (stop <= addr || stop >= toaddr))
1261 irel->r_offset += count;
1262 else if (stop > addr
1264 && (start <= addr || start >= toaddr))
1265 irel->r_offset -= count;
1269 if (irel->r_type == R_SH_SWITCH16)
1270 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1271 else if (irel->r_type == R_SH_SWITCH8)
1272 voff = bfd_get_8 (abfd, contents + nraddr);
1274 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1275 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1280 start = irel->r_vaddr - sec->vma;
1281 stop = (bfd_vma) ((bfd_signed_vma) start
1282 + (long) irel->r_offset
1289 && (stop <= addr || stop >= toaddr))
1291 else if (stop > addr
1293 && (start <= addr || start >= toaddr))
1302 switch (irel->r_type)
1308 case R_SH_PCDISP8BY2:
1309 case R_SH_PCRELIMM8BY2:
1311 if ((oinsn & 0xff00) != (insn & 0xff00))
1313 bfd_put_16 (abfd, insn, contents + nraddr);
1318 if ((oinsn & 0xf000) != (insn & 0xf000))
1320 bfd_put_16 (abfd, insn, contents + nraddr);
1323 case R_SH_PCRELIMM8BY4:
1324 BFD_ASSERT (adjust == count || count >= 4);
1329 if ((irel->r_vaddr & 3) == 0)
1332 if ((oinsn & 0xff00) != (insn & 0xff00))
1334 bfd_put_16 (abfd, insn, contents + nraddr);
1339 if (voff < 0 || voff >= 0xff)
1341 bfd_put_8 (abfd, voff, contents + nraddr);
1346 if (voff < - 0x8000 || voff >= 0x8000)
1348 bfd_put_signed_16 (abfd, voff, contents + nraddr);
1353 bfd_put_signed_32 (abfd, voff, contents + nraddr);
1357 irel->r_offset += adjust;
1363 ((*_bfd_error_handler)
1364 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1365 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
1366 bfd_set_error (bfd_error_bad_value);
1371 irel->r_vaddr = nraddr + sec->vma;
1374 /* Look through all the other sections. If there contain any IMM32
1375 relocs against internal symbols which we are not going to adjust
1376 below, we may need to adjust the addends. */
1377 for (o = abfd->sections; o != NULL; o = o->next)
1379 struct internal_reloc *internal_relocs;
1380 struct internal_reloc *irelscan, *irelscanend;
1381 bfd_byte *ocontents;
1384 || (o->flags & SEC_RELOC) == 0
1385 || o->reloc_count == 0)
1388 /* We always cache the relocs. Perhaps, if info->keep_memory is
1389 false, we should free them, if we are permitted to, when we
1390 leave sh_coff_relax_section. */
1391 internal_relocs = (_bfd_coff_read_internal_relocs
1392 (abfd, o, true, (bfd_byte *) NULL, false,
1393 (struct internal_reloc *) NULL));
1394 if (internal_relocs == NULL)
1398 irelscanend = internal_relocs + o->reloc_count;
1399 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1401 struct internal_syment sym;
1404 if (irelscan->r_type != R_SH_IMM32
1405 && irelscan->r_type != R_SH_IMAGEBASE
1406 && irelscan->r_type != R_SH_IMM32CE)
1408 if (irelscan->r_type != R_SH_IMM32)
1412 bfd_coff_swap_sym_in (abfd,
1413 ((bfd_byte *) obj_coff_external_syms (abfd)
1414 + (irelscan->r_symndx
1415 * bfd_coff_symesz (abfd))),
1417 if (sym.n_sclass != C_EXT
1418 && sym.n_scnum == sec->target_index
1419 && ((bfd_vma) sym.n_value <= addr
1420 || (bfd_vma) sym.n_value >= toaddr))
1424 if (ocontents == NULL)
1426 if (coff_section_data (abfd, o)->contents != NULL)
1427 ocontents = coff_section_data (abfd, o)->contents;
1430 /* We always cache the section contents.
1431 Perhaps, if info->keep_memory is false, we
1432 should free them, if we are permitted to,
1433 when we leave sh_coff_relax_section. */
1434 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1435 if (ocontents == NULL)
1437 if (! bfd_get_section_contents (abfd, o, ocontents,
1441 coff_section_data (abfd, o)->contents = ocontents;
1445 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1447 if (val > addr && val < toaddr)
1448 bfd_put_32 (abfd, val - count,
1449 ocontents + irelscan->r_vaddr - o->vma);
1451 coff_section_data (abfd, o)->keep_contents = true;
1456 /* Adjusting the internal symbols will not work if something has
1457 already retrieved the generic symbols. It would be possible to
1458 make this work by adjusting the generic symbols at the same time.
1459 However, this case should not arise in normal usage. */
1460 if (obj_symbols (abfd) != NULL
1461 || obj_raw_syments (abfd) != NULL)
1463 ((*_bfd_error_handler)
1464 ("%s: fatal: generic symbols retrieved before relaxing",
1465 bfd_get_filename (abfd)));
1466 bfd_set_error (bfd_error_invalid_operation);
1470 /* Adjust all the symbols. */
1471 sym_hash = obj_coff_sym_hashes (abfd);
1472 symesz = bfd_coff_symesz (abfd);
1473 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1474 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1475 while (esym < esymend)
1477 struct internal_syment isym;
1479 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1481 if (isym.n_scnum == sec->target_index
1482 && (bfd_vma) isym.n_value > addr
1483 && (bfd_vma) isym.n_value < toaddr)
1485 isym.n_value -= count;
1487 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1489 if (*sym_hash != NULL)
1491 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1492 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1493 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1494 && (*sym_hash)->root.u.def.value < toaddr);
1495 (*sym_hash)->root.u.def.value -= count;
1499 esym += (isym.n_numaux + 1) * symesz;
1500 sym_hash += isym.n_numaux + 1;
1503 /* See if we can move the ALIGN reloc forward. We have adjusted
1504 r_vaddr for it already. */
1505 if (irelalign != NULL)
1507 bfd_vma alignto, alignaddr;
1509 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1510 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1511 1 << irelalign->r_offset);
1512 if (alignto != alignaddr)
1514 /* Tail recursion. */
1515 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1516 alignto - alignaddr);
1523 /* This is yet another version of the SH opcode table, used to rapidly
1524 get information about a particular instruction. */
1526 /* The opcode map is represented by an array of these structures. The
1527 array is indexed by the high order four bits in the instruction. */
1529 struct sh_major_opcode
1531 /* A pointer to the instruction list. This is an array which
1532 contains all the instructions with this major opcode. */
1533 const struct sh_minor_opcode *minor_opcodes;
1534 /* The number of elements in minor_opcodes. */
1535 unsigned short count;
1538 /* This structure holds information for a set of SH opcodes. The
1539 instruction code is anded with the mask value, and the resulting
1540 value is used to search the order opcode list. */
1542 struct sh_minor_opcode
1544 /* The sorted opcode list. */
1545 const struct sh_opcode *opcodes;
1546 /* The number of elements in opcodes. */
1547 unsigned short count;
1548 /* The mask value to use when searching the opcode list. */
1549 unsigned short mask;
1552 /* This structure holds information for an SH instruction. An array
1553 of these structures is sorted in order by opcode. */
1557 /* The code for this instruction, after it has been anded with the
1558 mask value in the sh_major_opcode structure. */
1559 unsigned short opcode;
1560 /* Flags for this instruction. */
1561 unsigned long flags;
1564 /* Flag which appear in the sh_opcode structure. */
1566 /* This instruction loads a value from memory. */
1569 /* This instruction stores a value to memory. */
1572 /* This instruction is a branch. */
1573 #define BRANCH (0x4)
1575 /* This instruction has a delay slot. */
1578 /* This instruction uses the value in the register in the field at
1579 mask 0x0f00 of the instruction. */
1580 #define USES1 (0x10)
1581 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1583 /* This instruction uses the value in the register in the field at
1584 mask 0x00f0 of the instruction. */
1585 #define USES2 (0x20)
1586 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1588 /* This instruction uses the value in register 0. */
1589 #define USESR0 (0x40)
1591 /* This instruction sets the value in the register in the field at
1592 mask 0x0f00 of the instruction. */
1593 #define SETS1 (0x80)
1594 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1596 /* This instruction sets the value in the register in the field at
1597 mask 0x00f0 of the instruction. */
1598 #define SETS2 (0x100)
1599 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1601 /* This instruction sets register 0. */
1602 #define SETSR0 (0x200)
1604 /* This instruction sets a special register. */
1605 #define SETSSP (0x400)
1607 /* This instruction uses a special register. */
1608 #define USESSP (0x800)
1610 /* This instruction uses the floating point register in the field at
1611 mask 0x0f00 of the instruction. */
1612 #define USESF1 (0x1000)
1613 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1615 /* This instruction uses the floating point register in the field at
1616 mask 0x00f0 of the instruction. */
1617 #define USESF2 (0x2000)
1618 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1620 /* This instruction uses floating point register 0. */
1621 #define USESF0 (0x4000)
1623 /* This instruction sets the floating point register in the field at
1624 mask 0x0f00 of the instruction. */
1625 #define SETSF1 (0x8000)
1626 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1628 #define USESAS (0x10000)
1629 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1630 #define USESR8 (0x20000)
1631 #define SETSAS (0x40000)
1632 #define SETSAS_REG(x) USESAS_REG (x)
1634 #ifndef COFF_IMAGE_WITH_PE
1635 static boolean sh_insn_uses_reg
1636 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1637 static boolean sh_insn_sets_reg
1638 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1639 static boolean sh_insn_uses_or_sets_reg
1640 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1641 static boolean sh_insn_uses_freg
1642 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1643 static boolean sh_insn_sets_freg
1644 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1645 static boolean sh_insn_uses_or_sets_freg
1646 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1647 static boolean sh_insns_conflict
1648 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1649 const struct sh_opcode *));
1650 static boolean sh_load_use
1651 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1652 const struct sh_opcode *));
1654 /* The opcode maps. */
1656 #define MAP(a) a, sizeof a / sizeof a[0]
1658 static const struct sh_opcode sh_opcode00[] =
1660 { 0x0008, SETSSP }, /* clrt */
1661 { 0x0009, 0 }, /* nop */
1662 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1663 { 0x0018, SETSSP }, /* sett */
1664 { 0x0019, SETSSP }, /* div0u */
1665 { 0x001b, 0 }, /* sleep */
1666 { 0x0028, SETSSP }, /* clrmac */
1667 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1668 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1669 { 0x0048, SETSSP }, /* clrs */
1670 { 0x0058, SETSSP } /* sets */
1673 static const struct sh_opcode sh_opcode01[] =
1675 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1676 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1677 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1678 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1679 { 0x0029, SETS1 | USESSP }, /* movt rn */
1680 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1681 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1682 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1683 { 0x0083, LOAD | USES1 }, /* pref @rn */
1684 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1685 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1686 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1687 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1688 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1691 /* These sixteen instructions can be handled with one table entry below. */
1693 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1694 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1695 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1696 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1697 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1698 { 0x0052, SETS1 | USESSP }, /* stc mod,rn */
1699 { 0x0062, SETS1 | USESSP }, /* stc rs,rn */
1700 { 0x0072, SETS1 | USESSP }, /* stc re,rn */
1701 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1702 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1703 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1704 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1705 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1706 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1707 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1708 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1711 static const struct sh_opcode sh_opcode02[] =
1713 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1714 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1715 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1716 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1717 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1718 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1719 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1720 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1721 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1724 static const struct sh_minor_opcode sh_opcode0[] =
1726 { MAP (sh_opcode00), 0xffff },
1727 { MAP (sh_opcode01), 0xf0ff },
1728 { MAP (sh_opcode02), 0xf00f }
1731 static const struct sh_opcode sh_opcode10[] =
1733 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1736 static const struct sh_minor_opcode sh_opcode1[] =
1738 { MAP (sh_opcode10), 0xf000 }
1741 static const struct sh_opcode sh_opcode20[] =
1743 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1744 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1745 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1746 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1747 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1748 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1749 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1750 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1751 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1752 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1753 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1754 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1755 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1756 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1757 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1760 static const struct sh_minor_opcode sh_opcode2[] =
1762 { MAP (sh_opcode20), 0xf00f }
1765 static const struct sh_opcode sh_opcode30[] =
1767 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1768 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1769 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1770 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1771 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1772 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1773 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1774 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1775 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1776 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1777 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1778 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1779 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1780 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1783 static const struct sh_minor_opcode sh_opcode3[] =
1785 { MAP (sh_opcode30), 0xf00f }
1788 static const struct sh_opcode sh_opcode40[] =
1790 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1791 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1792 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1793 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1794 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1795 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1796 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1797 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1798 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1799 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1800 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1801 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1802 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1803 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1804 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1805 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1806 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1807 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1808 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1809 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1810 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1811 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1812 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1813 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1814 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1815 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1816 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1817 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1818 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1819 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1820 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1821 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1822 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1823 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1824 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1825 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1826 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1827 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1828 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1829 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1830 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1831 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1832 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1833 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1834 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1835 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1836 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1837 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1838 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1839 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1840 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1841 #if 0 /* These groups sixteen insns can be
1842 handled with one table entry each below. */
1843 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1844 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1845 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1846 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1847 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1848 { 0x4053, STORE | SETS1 | USES1 | USESSP }, /* stc.l mod,@-rn */
1849 { 0x4063, STORE | SETS1 | USES1 | USESSP }, /* stc.l rs,@-rn */
1850 { 0x4073, STORE | SETS1 | USES1 | USESSP }, /* stc.l re,@-rn */
1851 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l r0_bank,@-rn */
1853 { 0x40f3, STORE | SETS1 | USES1 | USESSP }, /* stc.l r7_bank,@-rn */
1855 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1856 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1857 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1858 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1859 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1860 { 0x4057, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,mod */
1861 { 0x4067, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rs */
1862 { 0x4077, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,re */
1863 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r0_bank */
1865 { 0x40f7, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r7_bank */
1867 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1868 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1869 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1870 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1871 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1872 { 0x405e, SETSSP | USES1 }, /* ldc rm,mod */
1873 { 0x406e, SETSSP | USES1 }, /* ldc rm,rs */
1874 { 0x407e, SETSSP | USES1 } /* ldc rm,re */
1875 { 0x408e, SETSSP | USES1 } /* ldc rm,r0_bank */
1877 { 0x40fe, SETSSP | USES1 } /* ldc rm,r7_bank */
1881 static const struct sh_opcode sh_opcode41[] =
1883 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1884 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1885 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1886 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1887 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1888 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1891 static const struct sh_minor_opcode sh_opcode4[] =
1893 { MAP (sh_opcode40), 0xf0ff },
1894 { MAP (sh_opcode41), 0xf00f }
1897 static const struct sh_opcode sh_opcode50[] =
1899 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1902 static const struct sh_minor_opcode sh_opcode5[] =
1904 { MAP (sh_opcode50), 0xf000 }
1907 static const struct sh_opcode sh_opcode60[] =
1909 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1910 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1911 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1912 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1913 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1914 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1915 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1916 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1917 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1918 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1919 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1920 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1921 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1922 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1923 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1924 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1927 static const struct sh_minor_opcode sh_opcode6[] =
1929 { MAP (sh_opcode60), 0xf00f }
1932 static const struct sh_opcode sh_opcode70[] =
1934 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1937 static const struct sh_minor_opcode sh_opcode7[] =
1939 { MAP (sh_opcode70), 0xf000 }
1942 static const struct sh_opcode sh_opcode80[] =
1944 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1945 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1946 { 0x8200, SETSSP }, /* setrc #imm */
1947 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1948 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1949 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1950 { 0x8900, BRANCH | USESSP }, /* bt label */
1951 { 0x8b00, BRANCH | USESSP }, /* bf label */
1952 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1953 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1954 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1955 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1958 static const struct sh_minor_opcode sh_opcode8[] =
1960 { MAP (sh_opcode80), 0xff00 }
1963 static const struct sh_opcode sh_opcode90[] =
1965 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1968 static const struct sh_minor_opcode sh_opcode9[] =
1970 { MAP (sh_opcode90), 0xf000 }
1973 static const struct sh_opcode sh_opcodea0[] =
1975 { 0xa000, BRANCH | DELAY } /* bra label */
1978 static const struct sh_minor_opcode sh_opcodea[] =
1980 { MAP (sh_opcodea0), 0xf000 }
1983 static const struct sh_opcode sh_opcodeb0[] =
1985 { 0xb000, BRANCH | DELAY } /* bsr label */
1988 static const struct sh_minor_opcode sh_opcodeb[] =
1990 { MAP (sh_opcodeb0), 0xf000 }
1993 static const struct sh_opcode sh_opcodec0[] =
1995 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1996 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1997 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1998 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1999 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
2000 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
2001 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
2002 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
2003 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
2004 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
2005 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
2006 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
2007 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
2008 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
2009 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
2010 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
2013 static const struct sh_minor_opcode sh_opcodec[] =
2015 { MAP (sh_opcodec0), 0xff00 }
2018 static const struct sh_opcode sh_opcoded0[] =
2020 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
2023 static const struct sh_minor_opcode sh_opcoded[] =
2025 { MAP (sh_opcoded0), 0xf000 }
2028 static const struct sh_opcode sh_opcodee0[] =
2030 { 0xe000, SETS1 } /* mov #imm,rn */
2033 static const struct sh_minor_opcode sh_opcodee[] =
2035 { MAP (sh_opcodee0), 0xf000 }
2038 static const struct sh_opcode sh_opcodef0[] =
2040 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
2041 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
2042 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
2043 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
2044 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
2045 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
2046 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
2047 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
2048 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
2049 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
2050 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
2051 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
2052 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
2053 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
2056 static const struct sh_opcode sh_opcodef1[] =
2058 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
2059 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
2060 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
2061 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
2062 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
2063 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
2064 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
2065 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
2066 { 0xf08d, SETSF1 }, /* fldi0 fn */
2067 { 0xf09d, SETSF1 } /* fldi1 fn */
2070 static const struct sh_minor_opcode sh_opcodef[] =
2072 { MAP (sh_opcodef0), 0xf00f },
2073 { MAP (sh_opcodef1), 0xf0ff }
2076 static struct sh_major_opcode sh_opcodes[] =
2078 { MAP (sh_opcode0) },
2079 { MAP (sh_opcode1) },
2080 { MAP (sh_opcode2) },
2081 { MAP (sh_opcode3) },
2082 { MAP (sh_opcode4) },
2083 { MAP (sh_opcode5) },
2084 { MAP (sh_opcode6) },
2085 { MAP (sh_opcode7) },
2086 { MAP (sh_opcode8) },
2087 { MAP (sh_opcode9) },
2088 { MAP (sh_opcodea) },
2089 { MAP (sh_opcodeb) },
2090 { MAP (sh_opcodec) },
2091 { MAP (sh_opcoded) },
2092 { MAP (sh_opcodee) },
2093 { MAP (sh_opcodef) }
2096 /* The double data transfer / parallel processing insns are not
2097 described here. This will cause sh_align_load_span to leave them alone. */
2099 static const struct sh_opcode sh_dsp_opcodef0[] =
2101 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2102 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2103 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2104 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2105 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2106 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2107 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2108 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2111 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2113 { MAP (sh_dsp_opcodef0), 0xfc0d }
2116 #ifndef COFF_IMAGE_WITH_PE
2117 /* Given an instruction, return a pointer to the corresponding
2118 sh_opcode structure. Return NULL if the instruction is not
2121 static const struct sh_opcode *
2125 const struct sh_major_opcode *maj;
2126 const struct sh_minor_opcode *min, *minend;
2128 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2129 min = maj->minor_opcodes;
2130 minend = min + maj->count;
2131 for (; min < minend; min++)
2134 const struct sh_opcode *op, *opend;
2136 l = insn & min->mask;
2138 opend = op + min->count;
2140 /* Since the opcodes tables are sorted, we could use a binary
2141 search here if the count were above some cutoff value. */
2142 for (; op < opend; op++)
2143 if (op->opcode == l)
2150 /* See whether an instruction uses or sets a general purpose register */
2153 sh_insn_uses_or_sets_reg (insn, op, reg)
2155 const struct sh_opcode *op;
2158 if (sh_insn_uses_reg (insn, op, reg))
2161 return sh_insn_sets_reg (insn, op, reg);
2164 /* See whether an instruction uses a general purpose register. */
2167 sh_insn_uses_reg (insn, op, reg)
2169 const struct sh_opcode *op;
2176 if ((f & USES1) != 0
2177 && USES1_REG (insn) == reg)
2179 if ((f & USES2) != 0
2180 && USES2_REG (insn) == reg)
2182 if ((f & USESR0) != 0
2185 if ((f & USESAS) && reg == USESAS_REG (insn))
2187 if ((f & USESR8) && reg == 8)
2193 /* See whether an instruction sets a general purpose register. */
2196 sh_insn_sets_reg (insn, op, reg)
2198 const struct sh_opcode *op;
2205 if ((f & SETS1) != 0
2206 && SETS1_REG (insn) == reg)
2208 if ((f & SETS2) != 0
2209 && SETS2_REG (insn) == reg)
2211 if ((f & SETSR0) != 0
2214 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2220 /* See whether an instruction uses or sets a floating point register */
2223 sh_insn_uses_or_sets_freg (insn, op, reg)
2225 const struct sh_opcode *op;
2228 if (sh_insn_uses_freg (insn, op, reg))
2231 return sh_insn_sets_freg (insn, op, reg);
2234 /* See whether an instruction uses a floating point register. */
2237 sh_insn_uses_freg (insn, op, freg)
2239 const struct sh_opcode *op;
2246 /* We can't tell if this is a double-precision insn, so just play safe
2247 and assume that it might be. So not only have we test FREG against
2248 itself, but also even FREG against FREG+1 - if the using insn uses
2249 just the low part of a double precision value - but also an odd
2250 FREG against FREG-1 - if the setting insn sets just the low part
2251 of a double precision value.
2252 So what this all boils down to is that we have to ignore the lowest
2253 bit of the register number. */
2255 if ((f & USESF1) != 0
2256 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2258 if ((f & USESF2) != 0
2259 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2261 if ((f & USESF0) != 0
2268 /* See whether an instruction sets a floating point register. */
2271 sh_insn_sets_freg (insn, op, freg)
2273 const struct sh_opcode *op;
2280 /* We can't tell if this is a double-precision insn, so just play safe
2281 and assume that it might be. So not only have we test FREG against
2282 itself, but also even FREG against FREG+1 - if the using insn uses
2283 just the low part of a double precision value - but also an odd
2284 FREG against FREG-1 - if the setting insn sets just the low part
2285 of a double precision value.
2286 So what this all boils down to is that we have to ignore the lowest
2287 bit of the register number. */
2289 if ((f & SETSF1) != 0
2290 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2296 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2297 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2298 This should return true if there is a conflict, or false if the
2299 instructions can be swapped safely. */
2302 sh_insns_conflict (i1, op1, i2, op2)
2304 const struct sh_opcode *op1;
2306 const struct sh_opcode *op2;
2308 unsigned int f1, f2;
2313 /* Load of fpscr conflicts with floating point operations.
2314 FIXME: shouldn't test raw opcodes here. */
2315 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2316 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2319 if ((f1 & (BRANCH | DELAY)) != 0
2320 || (f2 & (BRANCH | DELAY)) != 0)
2323 if (((f1 | f2) & SETSSP)
2324 && (f1 & (SETSSP | USESSP))
2325 && (f2 & (SETSSP | USESSP)))
2328 if ((f1 & SETS1) != 0
2329 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2331 if ((f1 & SETS2) != 0
2332 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2334 if ((f1 & SETSR0) != 0
2335 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2338 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2340 if ((f1 & SETSF1) != 0
2341 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2344 if ((f2 & SETS1) != 0
2345 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2347 if ((f2 & SETS2) != 0
2348 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2350 if ((f2 & SETSR0) != 0
2351 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2354 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2356 if ((f2 & SETSF1) != 0
2357 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2360 /* The instructions do not conflict. */
2364 /* I1 is a load instruction, and I2 is some other instruction. Return
2365 true if I1 loads a register which I2 uses. */
2368 sh_load_use (i1, op1, i2, op2)
2370 const struct sh_opcode *op1;
2372 const struct sh_opcode *op2;
2378 if ((f1 & LOAD) == 0)
2381 /* If both SETS1 and SETSSP are set, that means a load to a special
2382 register using postincrement addressing mode, which we don't care
2384 if ((f1 & SETS1) != 0
2385 && (f1 & SETSSP) == 0
2386 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2389 if ((f1 & SETSR0) != 0
2390 && sh_insn_uses_reg (i2, op2, 0))
2393 if ((f1 & SETSF1) != 0
2394 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2400 /* Try to align loads and stores within a span of memory. This is
2401 called by both the ELF and the COFF sh targets. ABFD and SEC are
2402 the BFD and section we are examining. CONTENTS is the contents of
2403 the section. SWAP is the routine to call to swap two instructions.
2404 RELOCS is a pointer to the internal relocation information, to be
2405 passed to SWAP. PLABEL is a pointer to the current label in a
2406 sorted list of labels; LABEL_END is the end of the list. START and
2407 STOP are the range of memory to examine. If a swap is made,
2408 *PSWAPPED is set to true. */
2414 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2415 plabel, label_end, start, stop, pswapped)
2419 boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2427 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2428 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2431 /* The SH4 has a Harvard architecture, hence aligning loads is not
2432 desirable. In fact, it is counter-productive, since it interferes
2433 with the schedules generated by the compiler. */
2434 if (abfd->arch_info->mach == bfd_mach_sh4)
2437 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2441 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2442 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2445 /* Instructions should be aligned on 2 byte boundaries. */
2446 if ((start & 1) == 1)
2449 /* Now look through the unaligned addresses. */
2453 for (; i < stop; i += 4)
2456 const struct sh_opcode *op;
2457 unsigned int prev_insn = 0;
2458 const struct sh_opcode *prev_op = NULL;
2460 insn = bfd_get_16 (abfd, contents + i);
2461 op = sh_insn_info (insn);
2463 || (op->flags & (LOAD | STORE)) == 0)
2466 /* This is a load or store which is not on a four byte boundary. */
2468 while (*plabel < label_end && **plabel < i)
2473 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2474 /* If INSN is the field b of a parallel processing insn, it is not
2475 a load / store after all. Note that the test here might mistake
2476 the field_b of a pcopy insn for the starting code of a parallel
2477 processing insn; this might miss a swapping opportunity, but at
2478 least we're on the safe side. */
2479 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2482 /* Check if prev_insn is actually the field b of a parallel
2483 processing insn. Again, this can give a spurious match
2485 if (dsp && i - 2 > start)
2487 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2489 if ((pprev_insn & 0xfc00) == 0xf800)
2492 prev_op = sh_insn_info (prev_insn);
2495 prev_op = sh_insn_info (prev_insn);
2497 /* If the load/store instruction is in a delay slot, we
2500 || (prev_op->flags & DELAY) != 0)
2504 && (*plabel >= label_end || **plabel != i)
2506 && (prev_op->flags & (LOAD | STORE)) == 0
2507 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2511 /* The load/store instruction does not have a label, and
2512 there is a previous instruction; PREV_INSN is not
2513 itself a load/store instruction, and PREV_INSN and
2514 INSN do not conflict. */
2520 unsigned int prev2_insn;
2521 const struct sh_opcode *prev2_op;
2523 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2524 prev2_op = sh_insn_info (prev2_insn);
2526 /* If the instruction before PREV_INSN has a delay
2527 slot--that is, PREV_INSN is in a delay slot--we
2529 if (prev2_op == NULL
2530 || (prev2_op->flags & DELAY) != 0)
2533 /* If the instruction before PREV_INSN is a load,
2534 and it sets a register which INSN uses, then
2535 putting INSN immediately after PREV_INSN will
2536 cause a pipeline bubble, so there is no point to
2539 && (prev2_op->flags & LOAD) != 0
2540 && sh_load_use (prev2_insn, prev2_op, insn, op))
2546 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2553 while (*plabel < label_end && **plabel < i + 2)
2557 && (*plabel >= label_end || **plabel != i + 2))
2559 unsigned int next_insn;
2560 const struct sh_opcode *next_op;
2562 /* There is an instruction after the load/store
2563 instruction, and it does not have a label. */
2564 next_insn = bfd_get_16 (abfd, contents + i + 2);
2565 next_op = sh_insn_info (next_insn);
2567 && (next_op->flags & (LOAD | STORE)) == 0
2568 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2572 /* NEXT_INSN is not itself a load/store instruction,
2573 and it does not conflict with INSN. */
2577 /* If PREV_INSN is a load, and it sets a register
2578 which NEXT_INSN uses, then putting NEXT_INSN
2579 immediately after PREV_INSN will cause a pipeline
2580 bubble, so there is no reason to make this swap. */
2582 && (prev_op->flags & LOAD) != 0
2583 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2586 /* If INSN is a load, and it sets a register which
2587 the insn after NEXT_INSN uses, then doing the
2588 swap will cause a pipeline bubble, so there is no
2589 reason to make the swap. However, if the insn
2590 after NEXT_INSN is itself a load or store
2591 instruction, then it is misaligned, so
2592 optimistically hope that it will be swapped
2593 itself, and just live with the pipeline bubble if
2597 && (op->flags & LOAD) != 0)
2599 unsigned int next2_insn;
2600 const struct sh_opcode *next2_op;
2602 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2603 next2_op = sh_insn_info (next2_insn);
2604 if ((next2_op->flags & (LOAD | STORE)) == 0
2605 && sh_load_use (insn, op, next2_insn, next2_op))
2611 if (! (*swap) (abfd, sec, relocs, contents, i))
2622 #endif /* not COFF_IMAGE_WITH_PE */
2624 /* Look for loads and stores which we can align to four byte
2625 boundaries. See the longer comment above sh_relax_section for why
2626 this is desirable. This sets *PSWAPPED if some instruction was
2630 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2633 struct internal_reloc *internal_relocs;
2637 struct internal_reloc *irel, *irelend;
2638 bfd_vma *labels = NULL;
2639 bfd_vma *label, *label_end;
2643 irelend = internal_relocs + sec->reloc_count;
2645 /* Get all the addresses with labels on them. */
2646 labels = (bfd_vma *) bfd_malloc (sec->reloc_count * sizeof (bfd_vma));
2650 for (irel = internal_relocs; irel < irelend; irel++)
2652 if (irel->r_type == R_SH_LABEL)
2654 *label_end = irel->r_vaddr - sec->vma;
2659 /* Note that the assembler currently always outputs relocs in
2660 address order. If that ever changes, this code will need to sort
2661 the label values and the relocs. */
2665 for (irel = internal_relocs; irel < irelend; irel++)
2667 bfd_vma start, stop;
2669 if (irel->r_type != R_SH_CODE)
2672 start = irel->r_vaddr - sec->vma;
2674 for (irel++; irel < irelend; irel++)
2675 if (irel->r_type == R_SH_DATA)
2678 stop = irel->r_vaddr - sec->vma;
2680 stop = sec->_cooked_size;
2682 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2683 (PTR) internal_relocs, &label,
2684 label_end, start, stop, pswapped))
2698 /* Swap two SH instructions. */
2701 sh_swap_insns (abfd, sec, relocs, contents, addr)
2708 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2709 unsigned short i1, i2;
2710 struct internal_reloc *irel, *irelend;
2712 /* Swap the instructions themselves. */
2713 i1 = bfd_get_16 (abfd, contents + addr);
2714 i2 = bfd_get_16 (abfd, contents + addr + 2);
2715 bfd_put_16 (abfd, i2, contents + addr);
2716 bfd_put_16 (abfd, i1, contents + addr + 2);
2718 /* Adjust all reloc addresses. */
2719 irelend = internal_relocs + sec->reloc_count;
2720 for (irel = internal_relocs; irel < irelend; irel++)
2724 /* There are a few special types of relocs that we don't want to
2725 adjust. These relocs do not apply to the instruction itself,
2726 but are only associated with the address. */
2727 type = irel->r_type;
2728 if (type == R_SH_ALIGN
2729 || type == R_SH_CODE
2730 || type == R_SH_DATA
2731 || type == R_SH_LABEL)
2734 /* If an R_SH_USES reloc points to one of the addresses being
2735 swapped, we must adjust it. It would be incorrect to do this
2736 for a jump, though, since we want to execute both
2737 instructions after the jump. (We have avoided swapping
2738 around a label, so the jump will not wind up executing an
2739 instruction it shouldn't). */
2740 if (type == R_SH_USES)
2744 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2746 irel->r_offset += 2;
2747 else if (off == addr + 2)
2748 irel->r_offset -= 2;
2751 if (irel->r_vaddr - sec->vma == addr)
2756 else if (irel->r_vaddr - sec->vma == addr + 2)
2767 unsigned short insn, oinsn;
2770 loc = contents + irel->r_vaddr - sec->vma;
2777 case R_SH_PCDISP8BY2:
2778 case R_SH_PCRELIMM8BY2:
2779 insn = bfd_get_16 (abfd, loc);
2782 if ((oinsn & 0xff00) != (insn & 0xff00))
2784 bfd_put_16 (abfd, insn, loc);
2788 insn = bfd_get_16 (abfd, loc);
2791 if ((oinsn & 0xf000) != (insn & 0xf000))
2793 bfd_put_16 (abfd, insn, loc);
2796 case R_SH_PCRELIMM8BY4:
2797 /* This reloc ignores the least significant 3 bits of
2798 the program counter before adding in the offset.
2799 This means that if ADDR is at an even address, the
2800 swap will not affect the offset. If ADDR is an at an
2801 odd address, then the instruction will be crossing a
2802 four byte boundary, and must be adjusted. */
2803 if ((addr & 3) != 0)
2805 insn = bfd_get_16 (abfd, loc);
2808 if ((oinsn & 0xff00) != (insn & 0xff00))
2810 bfd_put_16 (abfd, insn, loc);
2818 ((*_bfd_error_handler)
2819 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2820 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
2821 bfd_set_error (bfd_error_bad_value);
2830 /* This is a modification of _bfd_coff_generic_relocate_section, which
2831 will handle SH relaxing. */
2834 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2835 relocs, syms, sections)
2836 bfd *output_bfd ATTRIBUTE_UNUSED;
2837 struct bfd_link_info *info;
2839 asection *input_section;
2841 struct internal_reloc *relocs;
2842 struct internal_syment *syms;
2843 asection **sections;
2845 struct internal_reloc *rel;
2846 struct internal_reloc *relend;
2849 relend = rel + input_section->reloc_count;
2850 for (; rel < relend; rel++)
2853 struct coff_link_hash_entry *h;
2854 struct internal_syment *sym;
2857 reloc_howto_type *howto;
2858 bfd_reloc_status_type rstat;
2860 /* Almost all relocs have to do with relaxing. If any work must
2861 be done for them, it has been done in sh_relax_section. */
2862 if (rel->r_type != R_SH_IMM32
2864 && rel->r_type != R_SH_IMM32CE
2865 && rel->r_type != R_SH_IMAGEBASE
2867 && rel->r_type != R_SH_PCDISP)
2870 symndx = rel->r_symndx;
2880 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2882 (*_bfd_error_handler)
2883 ("%s: illegal symbol index %ld in relocs",
2884 bfd_get_filename (input_bfd), symndx);
2885 bfd_set_error (bfd_error_bad_value);
2888 h = obj_coff_sym_hashes (input_bfd)[symndx];
2889 sym = syms + symndx;
2892 if (sym != NULL && sym->n_scnum != 0)
2893 addend = - sym->n_value;
2897 if (rel->r_type == R_SH_PCDISP)
2900 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2903 howto = &sh_coff_howtos[rel->r_type];
2907 bfd_set_error (bfd_error_bad_value);
2912 if (rel->r_type == R_SH_IMAGEBASE)
2913 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2922 /* There is nothing to do for an internal PCDISP reloc. */
2923 if (rel->r_type == R_SH_PCDISP)
2928 sec = bfd_abs_section_ptr;
2933 sec = sections[symndx];
2934 val = (sec->output_section->vma
2935 + sec->output_offset
2942 if (h->root.type == bfd_link_hash_defined
2943 || h->root.type == bfd_link_hash_defweak)
2947 sec = h->root.u.def.section;
2948 val = (h->root.u.def.value
2949 + sec->output_section->vma
2950 + sec->output_offset);
2952 else if (! info->relocateable)
2954 if (! ((*info->callbacks->undefined_symbol)
2955 (info, h->root.root.string, input_bfd, input_section,
2956 rel->r_vaddr - input_section->vma, true)))
2961 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2963 rel->r_vaddr - input_section->vma,
2972 case bfd_reloc_overflow:
2975 char buf[SYMNMLEN + 1];
2980 name = h->root.root.string;
2981 else if (sym->_n._n_n._n_zeroes == 0
2982 && sym->_n._n_n._n_offset != 0)
2983 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2986 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2987 buf[SYMNMLEN] = '\0';
2991 if (! ((*info->callbacks->reloc_overflow)
2992 (info, name, howto->name, (bfd_vma) 0, input_bfd,
2993 input_section, rel->r_vaddr - input_section->vma)))
3002 /* This is a version of bfd_generic_get_relocated_section_contents
3003 which uses sh_relocate_section. */
3006 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
3007 data, relocateable, symbols)
3009 struct bfd_link_info *link_info;
3010 struct bfd_link_order *link_order;
3012 boolean relocateable;
3015 asection *input_section = link_order->u.indirect.section;
3016 bfd *input_bfd = input_section->owner;
3017 asection **sections = NULL;
3018 struct internal_reloc *internal_relocs = NULL;
3019 struct internal_syment *internal_syms = NULL;
3021 /* We only need to handle the case of relaxing, or of having a
3022 particular set of section contents, specially. */
3024 || coff_section_data (input_bfd, input_section) == NULL
3025 || coff_section_data (input_bfd, input_section)->contents == NULL)
3026 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
3031 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
3032 input_section->_raw_size);
3034 if ((input_section->flags & SEC_RELOC) != 0
3035 && input_section->reloc_count > 0)
3037 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
3038 bfd_byte *esym, *esymend;
3039 struct internal_syment *isymp;
3042 if (! _bfd_coff_get_external_symbols (input_bfd))
3045 internal_relocs = (_bfd_coff_read_internal_relocs
3046 (input_bfd, input_section, false, (bfd_byte *) NULL,
3047 false, (struct internal_reloc *) NULL));
3048 if (internal_relocs == NULL)
3051 internal_syms = ((struct internal_syment *)
3052 bfd_malloc (obj_raw_syment_count (input_bfd)
3053 * sizeof (struct internal_syment)));
3054 if (internal_syms == NULL)
3057 sections = (asection **) bfd_malloc (obj_raw_syment_count (input_bfd)
3058 * sizeof (asection *));
3059 if (sections == NULL)
3062 isymp = internal_syms;
3064 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3065 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3066 while (esym < esymend)
3068 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3070 if (isymp->n_scnum != 0)
3071 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3074 if (isymp->n_value == 0)
3075 *secpp = bfd_und_section_ptr;
3077 *secpp = bfd_com_section_ptr;
3080 esym += (isymp->n_numaux + 1) * symesz;
3081 secpp += isymp->n_numaux + 1;
3082 isymp += isymp->n_numaux + 1;
3085 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3086 input_section, data, internal_relocs,
3087 internal_syms, sections))
3092 free (internal_syms);
3093 internal_syms = NULL;
3094 free (internal_relocs);
3095 internal_relocs = NULL;
3101 if (internal_relocs != NULL)
3102 free (internal_relocs);
3103 if (internal_syms != NULL)
3104 free (internal_syms);
3105 if (sections != NULL)
3110 /* The target vectors. */
3112 #ifndef TARGET_SHL_SYM
3113 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL)
3116 #ifdef TARGET_SHL_SYM
3117 #define TARGET_SYM TARGET_SHL_SYM
3119 #define TARGET_SYM shlcoff_vec
3122 #ifndef TARGET_SHL_NAME
3123 #define TARGET_SHL_NAME "coff-shl"
3127 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3128 SEC_CODE | SEC_DATA, '_', NULL);
3130 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3134 #ifndef TARGET_SHL_SYM
3135 /* Some people want versions of the SH COFF target which do not align
3136 to 16 byte boundaries. We implement that by adding a couple of new
3137 target vectors. These are just like the ones above, but they
3138 change the default section alignment. To generate them in the
3139 assembler, use -small. To use them in the linker, use -b
3140 coff-sh{l}-small and -oformat coff-sh{l}-small.
3142 Yes, this is a horrible hack. A general solution for setting
3143 section alignment in COFF is rather complex. ELF handles this
3146 /* Only recognize the small versions if the target was not defaulted.
3147 Otherwise we won't recognize the non default endianness. */
3149 static const bfd_target *
3150 coff_small_object_p (abfd)
3153 if (abfd->target_defaulted)
3155 bfd_set_error (bfd_error_wrong_format);
3158 return coff_object_p (abfd);
3161 /* Set the section alignment for the small versions. */
3164 coff_small_new_section_hook (abfd, section)
3168 if (! coff_new_section_hook (abfd, section))
3171 /* We must align to at least a four byte boundary, because longword
3172 accesses must be on a four byte boundary. */
3173 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3174 section->alignment_power = 2;
3179 /* This is copied from bfd_coff_std_swap_table so that we can change
3180 the default section alignment power. */
3182 static const bfd_coff_backend_data bfd_coff_small_swap_table =
3184 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3185 coff_swap_aux_out, coff_swap_sym_out,
3186 coff_swap_lineno_out, coff_swap_reloc_out,
3187 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3188 coff_swap_scnhdr_out,
3189 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3190 #ifdef COFF_LONG_FILENAMES
3195 #ifdef COFF_LONG_SECTION_NAMES
3203 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3208 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3213 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3214 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3215 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3216 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3217 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3218 coff_classify_symbol, coff_compute_section_file_positions,
3219 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3220 coff_adjust_symndx, coff_link_add_one_symbol,
3221 coff_link_output_has_begun, coff_final_link_postscript
3224 #define coff_small_close_and_cleanup \
3225 coff_close_and_cleanup
3226 #define coff_small_bfd_free_cached_info \
3227 coff_bfd_free_cached_info
3228 #define coff_small_get_section_contents \
3229 coff_get_section_contents
3230 #define coff_small_get_section_contents_in_window \
3231 coff_get_section_contents_in_window
3233 extern const bfd_target shlcoff_small_vec;
3235 const bfd_target shcoff_small_vec =
3237 "coff-sh-small", /* name */
3238 bfd_target_coff_flavour,
3239 BFD_ENDIAN_BIG, /* data byte order is big */
3240 BFD_ENDIAN_BIG, /* header byte order is big */
3242 (HAS_RELOC | EXEC_P | /* object flags */
3243 HAS_LINENO | HAS_DEBUG |
3244 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3246 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3247 '_', /* leading symbol underscore */
3248 '/', /* ar_pad_char */
3249 15, /* ar_max_namelen */
3250 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3251 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3252 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3253 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3254 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3255 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3257 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3258 bfd_generic_archive_p, _bfd_dummy_target},
3259 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3261 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3262 _bfd_write_archive_contents, bfd_false},
3264 BFD_JUMP_TABLE_GENERIC (coff_small),
3265 BFD_JUMP_TABLE_COPY (coff),
3266 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3267 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3268 BFD_JUMP_TABLE_SYMBOLS (coff),
3269 BFD_JUMP_TABLE_RELOCS (coff),
3270 BFD_JUMP_TABLE_WRITE (coff),
3271 BFD_JUMP_TABLE_LINK (coff),
3272 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3274 & shlcoff_small_vec,
3276 (PTR) &bfd_coff_small_swap_table
3279 const bfd_target shlcoff_small_vec =
3281 "coff-shl-small", /* name */
3282 bfd_target_coff_flavour,
3283 BFD_ENDIAN_LITTLE, /* data byte order is little */
3284 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3286 (HAS_RELOC | EXEC_P | /* object flags */
3287 HAS_LINENO | HAS_DEBUG |
3288 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3290 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3291 '_', /* leading symbol underscore */
3292 '/', /* ar_pad_char */
3293 15, /* ar_max_namelen */
3294 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3295 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3296 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3297 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3298 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3299 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3301 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3302 bfd_generic_archive_p, _bfd_dummy_target},
3303 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3305 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3306 _bfd_write_archive_contents, bfd_false},
3308 BFD_JUMP_TABLE_GENERIC (coff_small),
3309 BFD_JUMP_TABLE_COPY (coff),
3310 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3311 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3312 BFD_JUMP_TABLE_SYMBOLS (coff),
3313 BFD_JUMP_TABLE_RELOCS (coff),
3314 BFD_JUMP_TABLE_WRITE (coff),
3315 BFD_JUMP_TABLE_LINK (coff),
3316 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3320 (PTR) &bfd_coff_small_swap_table