1 /* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011
4 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
6 Written by Steve Chamberlain, <sac@cygnus.com>.
7 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
9 This file is part of BFD, the Binary File Descriptor library.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
24 MA 02110-1301, USA. */
28 #include "libiberty.h"
32 #include "coff/internal.h"
34 #undef bfd_pe_print_pdata
39 #ifndef COFF_IMAGE_WITH_PE
40 static bfd_boolean sh_align_load_span
41 PARAMS ((bfd *, asection *, bfd_byte *,
42 bfd_boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
43 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *));
45 #define _bfd_sh_align_load_span sh_align_load_span
48 #define bfd_pe_print_pdata _bfd_pe_print_ce_compressed_pdata
52 #define bfd_pe_print_pdata NULL
54 #endif /* COFF_WITH_PE. */
58 /* Internal functions. */
59 static bfd_reloc_status_type sh_reloc
60 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
61 static long get_symbol_value PARAMS ((asymbol *));
62 static bfd_boolean sh_relax_section
63 PARAMS ((bfd *, asection *, struct bfd_link_info *, bfd_boolean *));
64 static bfd_boolean sh_relax_delete_bytes
65 PARAMS ((bfd *, asection *, bfd_vma, int));
66 #ifndef COFF_IMAGE_WITH_PE
67 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
69 static bfd_boolean sh_align_loads
70 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *,
72 static bfd_boolean sh_swap_insns
73 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
74 static bfd_boolean sh_relocate_section
75 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
76 struct internal_reloc *, struct internal_syment *, asection **));
77 static bfd_byte *sh_coff_get_relocated_section_contents
78 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
79 bfd_byte *, bfd_boolean, asymbol **));
80 static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
83 /* Can't build import tables with 2**4 alignment. */
84 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
86 /* Default section alignment to 2**4. */
87 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
90 #ifdef COFF_IMAGE_WITH_PE
91 /* Align PE executables. */
92 #define COFF_PAGE_SIZE 0x1000
95 /* Generate long file names. */
96 #define COFF_LONG_FILENAMES
99 static bfd_boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
100 /* Return TRUE if this relocation should
101 appear in the output .reloc section. */
102 static bfd_boolean in_reloc_p (abfd, howto)
103 bfd * abfd ATTRIBUTE_UNUSED;
104 reloc_howto_type * howto;
106 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
110 /* The supported relocations. There are a lot of relocations defined
111 in coff/internal.h which we do not expect to ever see. */
112 static reloc_howto_type sh_coff_howtos[] =
118 HOWTO (R_SH_IMM32CE, /* type */
120 2, /* size (0 = byte, 1 = short, 2 = long) */
122 FALSE, /* pc_relative */
124 complain_overflow_bitfield, /* complain_on_overflow */
125 sh_reloc, /* special_function */
126 "r_imm32ce", /* name */
127 TRUE, /* partial_inplace */
128 0xffffffff, /* src_mask */
129 0xffffffff, /* dst_mask */
130 FALSE), /* pcrel_offset */
134 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
135 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
136 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
137 EMPTY_HOWTO (6), /* R_SH_IMM24 */
138 EMPTY_HOWTO (7), /* R_SH_LOW16 */
140 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
142 HOWTO (R_SH_PCDISP8BY2, /* type */
144 1, /* size (0 = byte, 1 = short, 2 = long) */
146 TRUE, /* pc_relative */
148 complain_overflow_signed, /* complain_on_overflow */
149 sh_reloc, /* special_function */
150 "r_pcdisp8by2", /* name */
151 TRUE, /* partial_inplace */
154 TRUE), /* pcrel_offset */
156 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
158 HOWTO (R_SH_PCDISP, /* type */
160 1, /* size (0 = byte, 1 = short, 2 = long) */
162 TRUE, /* pc_relative */
164 complain_overflow_signed, /* complain_on_overflow */
165 sh_reloc, /* special_function */
166 "r_pcdisp12by2", /* name */
167 TRUE, /* partial_inplace */
168 0xfff, /* src_mask */
169 0xfff, /* dst_mask */
170 TRUE), /* pcrel_offset */
174 HOWTO (R_SH_IMM32, /* type */
176 2, /* size (0 = byte, 1 = short, 2 = long) */
178 FALSE, /* pc_relative */
180 complain_overflow_bitfield, /* complain_on_overflow */
181 sh_reloc, /* special_function */
182 "r_imm32", /* name */
183 TRUE, /* partial_inplace */
184 0xffffffff, /* src_mask */
185 0xffffffff, /* dst_mask */
186 FALSE), /* pcrel_offset */
190 HOWTO (R_SH_IMAGEBASE, /* type */
192 2, /* size (0 = byte, 1 = short, 2 = long) */
194 FALSE, /* pc_relative */
196 complain_overflow_bitfield, /* complain_on_overflow */
197 sh_reloc, /* special_function */
199 TRUE, /* partial_inplace */
200 0xffffffff, /* src_mask */
201 0xffffffff, /* dst_mask */
202 FALSE), /* pcrel_offset */
204 EMPTY_HOWTO (16), /* R_SH_IMM8 */
206 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
207 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
208 EMPTY_HOWTO (19), /* R_SH_IMM4 */
209 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
210 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
212 HOWTO (R_SH_PCRELIMM8BY2, /* type */
214 1, /* size (0 = byte, 1 = short, 2 = long) */
216 TRUE, /* pc_relative */
218 complain_overflow_unsigned, /* complain_on_overflow */
219 sh_reloc, /* special_function */
220 "r_pcrelimm8by2", /* name */
221 TRUE, /* partial_inplace */
224 TRUE), /* pcrel_offset */
226 HOWTO (R_SH_PCRELIMM8BY4, /* type */
228 1, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE, /* pc_relative */
232 complain_overflow_unsigned, /* complain_on_overflow */
233 sh_reloc, /* special_function */
234 "r_pcrelimm8by4", /* name */
235 TRUE, /* partial_inplace */
238 TRUE), /* pcrel_offset */
240 HOWTO (R_SH_IMM16, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 FALSE, /* pc_relative */
246 complain_overflow_bitfield, /* complain_on_overflow */
247 sh_reloc, /* special_function */
248 "r_imm16", /* name */
249 TRUE, /* partial_inplace */
250 0xffff, /* src_mask */
251 0xffff, /* dst_mask */
252 FALSE), /* pcrel_offset */
254 HOWTO (R_SH_SWITCH16, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE, /* pc_relative */
260 complain_overflow_bitfield, /* complain_on_overflow */
261 sh_reloc, /* special_function */
262 "r_switch16", /* name */
263 TRUE, /* partial_inplace */
264 0xffff, /* src_mask */
265 0xffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
268 HOWTO (R_SH_SWITCH32, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE, /* pc_relative */
274 complain_overflow_bitfield, /* complain_on_overflow */
275 sh_reloc, /* special_function */
276 "r_switch32", /* name */
277 TRUE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
282 HOWTO (R_SH_USES, /* type */
284 1, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE, /* pc_relative */
288 complain_overflow_bitfield, /* complain_on_overflow */
289 sh_reloc, /* special_function */
291 TRUE, /* partial_inplace */
292 0xffff, /* src_mask */
293 0xffff, /* dst_mask */
294 FALSE), /* pcrel_offset */
296 HOWTO (R_SH_COUNT, /* type */
298 2, /* size (0 = byte, 1 = short, 2 = long) */
300 FALSE, /* pc_relative */
302 complain_overflow_bitfield, /* complain_on_overflow */
303 sh_reloc, /* special_function */
304 "r_count", /* name */
305 TRUE, /* partial_inplace */
306 0xffffffff, /* src_mask */
307 0xffffffff, /* dst_mask */
308 FALSE), /* pcrel_offset */
310 HOWTO (R_SH_ALIGN, /* type */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
314 FALSE, /* pc_relative */
316 complain_overflow_bitfield, /* complain_on_overflow */
317 sh_reloc, /* special_function */
318 "r_align", /* name */
319 TRUE, /* partial_inplace */
320 0xffffffff, /* src_mask */
321 0xffffffff, /* dst_mask */
322 FALSE), /* pcrel_offset */
324 HOWTO (R_SH_CODE, /* type */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
328 FALSE, /* pc_relative */
330 complain_overflow_bitfield, /* complain_on_overflow */
331 sh_reloc, /* special_function */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
338 HOWTO (R_SH_DATA, /* type */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
342 FALSE, /* pc_relative */
344 complain_overflow_bitfield, /* complain_on_overflow */
345 sh_reloc, /* special_function */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
352 HOWTO (R_SH_LABEL, /* type */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
356 FALSE, /* pc_relative */
358 complain_overflow_bitfield, /* complain_on_overflow */
359 sh_reloc, /* special_function */
360 "r_label", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
366 HOWTO (R_SH_SWITCH8, /* type */
368 0, /* size (0 = byte, 1 = short, 2 = long) */
370 FALSE, /* pc_relative */
372 complain_overflow_bitfield, /* complain_on_overflow */
373 sh_reloc, /* special_function */
374 "r_switch8", /* name */
375 TRUE, /* partial_inplace */
378 FALSE) /* pcrel_offset */
381 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
383 /* Check for a bad magic number. */
384 #define BADMAG(x) SHBADMAG(x)
386 /* Customize coffcode.h (this is not currently used). */
389 /* FIXME: This should not be set here. */
390 #define __A_MAGIC_SET__
393 /* Swap the r_offset field in and out. */
394 #define SWAP_IN_RELOC_OFFSET H_GET_32
395 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
397 /* Swap out extra information in the reloc structure. */
398 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
401 dst->r_stuff[0] = 'S'; \
402 dst->r_stuff[1] = 'C'; \
407 /* Get the value of a symbol, when performing a relocation. */
410 get_symbol_value (symbol)
415 if (bfd_is_com_section (symbol->section))
418 relocation = (symbol->value +
419 symbol->section->output_section->vma +
420 symbol->section->output_offset);
426 /* Convert an rtype to howto for the COFF backend linker.
427 Copied from coff-i386. */
428 #define coff_rtype_to_howto coff_sh_rtype_to_howto
429 static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
431 static reloc_howto_type *
432 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
433 bfd * abfd ATTRIBUTE_UNUSED;
435 struct internal_reloc * rel;
436 struct coff_link_hash_entry * h;
437 struct internal_syment * sym;
440 reloc_howto_type * howto;
442 howto = sh_coff_howtos + rel->r_type;
446 if (howto->pc_relative)
447 *addendp += sec->vma;
449 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
451 /* This is a common symbol. The section contents include the
452 size (sym->n_value) as an addend. The relocate_section
453 function will be adding in the final value of the symbol. We
454 need to subtract out the current size in order to get the
456 BFD_ASSERT (h != NULL);
459 if (howto->pc_relative)
463 /* If the symbol is defined, then the generic code is going to
464 add back the symbol value in order to cancel out an
465 adjustment it made to the addend. However, we set the addend
466 to 0 at the start of this function. We need to adjust here,
467 to avoid the adjustment the generic code will make. FIXME:
468 This is getting a bit hackish. */
469 if (sym != NULL && sym->n_scnum != 0)
470 *addendp -= sym->n_value;
473 if (rel->r_type == R_SH_IMAGEBASE)
474 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
479 #endif /* COFF_WITH_PE */
481 /* This structure is used to map BFD reloc codes to SH PE relocs. */
482 struct shcoff_reloc_map
484 bfd_reloc_code_real_type bfd_reloc_val;
485 unsigned char shcoff_reloc_val;
489 /* An array mapping BFD reloc codes to SH PE relocs. */
490 static const struct shcoff_reloc_map sh_reloc_map[] =
492 { BFD_RELOC_32, R_SH_IMM32CE },
493 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
494 { BFD_RELOC_CTOR, R_SH_IMM32CE },
497 /* An array mapping BFD reloc codes to SH PE relocs. */
498 static const struct shcoff_reloc_map sh_reloc_map[] =
500 { BFD_RELOC_32, R_SH_IMM32 },
501 { BFD_RELOC_CTOR, R_SH_IMM32 },
505 /* Given a BFD reloc code, return the howto structure for the
506 corresponding SH PE reloc. */
507 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
508 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
510 static reloc_howto_type *
511 sh_coff_reloc_type_lookup (abfd, code)
512 bfd * abfd ATTRIBUTE_UNUSED;
513 bfd_reloc_code_real_type code;
517 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
518 if (sh_reloc_map[i].bfd_reloc_val == code)
519 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
521 (*_bfd_error_handler) (_("SH Error: unknown reloc type %d"), code);
525 static reloc_howto_type *
526 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
531 for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
532 if (sh_coff_howtos[i].name != NULL
533 && strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
534 return &sh_coff_howtos[i];
539 /* This macro is used in coffcode.h to get the howto corresponding to
540 an internal reloc. */
542 #define RTYPE2HOWTO(relent, internal) \
544 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
545 ? &sh_coff_howtos[(internal)->r_type] \
546 : (reloc_howto_type *) NULL))
548 /* This is the same as the macro in coffcode.h, except that it copies
549 r_offset into reloc_entry->addend for some relocs. */
550 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
552 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
553 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
554 coffsym = (obj_symbols (abfd) \
555 + (cache_ptr->sym_ptr_ptr - symbols)); \
557 coffsym = coff_symbol_from (abfd, ptr); \
558 if (coffsym != (coff_symbol_type *) NULL \
559 && coffsym->native->u.syment.n_scnum == 0) \
560 cache_ptr->addend = 0; \
561 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
562 && ptr->section != (asection *) NULL) \
563 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
565 cache_ptr->addend = 0; \
566 if ((reloc).r_type == R_SH_SWITCH8 \
567 || (reloc).r_type == R_SH_SWITCH16 \
568 || (reloc).r_type == R_SH_SWITCH32 \
569 || (reloc).r_type == R_SH_USES \
570 || (reloc).r_type == R_SH_COUNT \
571 || (reloc).r_type == R_SH_ALIGN) \
572 cache_ptr->addend = (reloc).r_offset; \
575 /* This is the howto function for the SH relocations. */
577 static bfd_reloc_status_type
578 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
581 arelent *reloc_entry;
584 asection *input_section;
586 char **error_message ATTRIBUTE_UNUSED;
590 unsigned short r_type;
591 bfd_vma addr = reloc_entry->address;
592 bfd_byte *hit_data = addr + (bfd_byte *) data;
594 r_type = reloc_entry->howto->type;
596 if (output_bfd != NULL)
598 /* Partial linking--do nothing. */
599 reloc_entry->address += input_section->output_offset;
603 /* Almost all relocs have to do with relaxing. If any work must be
604 done for them, it has been done in sh_relax_section. */
605 if (r_type != R_SH_IMM32
607 && r_type != R_SH_IMM32CE
608 && r_type != R_SH_IMAGEBASE
610 && (r_type != R_SH_PCDISP
611 || (symbol_in->flags & BSF_LOCAL) != 0))
614 if (symbol_in != NULL
615 && bfd_is_und_section (symbol_in->section))
616 return bfd_reloc_undefined;
618 sym_value = get_symbol_value (symbol_in);
626 insn = bfd_get_32 (abfd, hit_data);
627 insn += sym_value + reloc_entry->addend;
628 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
632 insn = bfd_get_32 (abfd, hit_data);
633 insn += sym_value + reloc_entry->addend;
634 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
635 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
639 insn = bfd_get_16 (abfd, hit_data);
640 sym_value += reloc_entry->addend;
641 sym_value -= (input_section->output_section->vma
642 + input_section->output_offset
645 sym_value += (insn & 0xfff) << 1;
648 insn = (insn & 0xf000) | (sym_value & 0xfff);
649 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
650 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
651 return bfd_reloc_overflow;
661 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
663 /* We can do relaxing. */
664 #define coff_bfd_relax_section sh_relax_section
666 /* We use the special COFF backend linker. */
667 #define coff_relocate_section sh_relocate_section
669 /* When relaxing, we need to use special code to get the relocated
671 #define coff_bfd_get_relocated_section_contents \
672 sh_coff_get_relocated_section_contents
674 #include "coffcode.h"
676 /* This function handles relaxing on the SH.
678 Function calls on the SH look like this:
687 The compiler and assembler will cooperate to create R_SH_USES
688 relocs on the jsr instructions. The r_offset field of the
689 R_SH_USES reloc is the PC relative offset to the instruction which
690 loads the register (the r_offset field is computed as though it
691 were a jump instruction, so the offset value is actually from four
692 bytes past the instruction). The linker can use this reloc to
693 determine just which function is being called, and thus decide
694 whether it is possible to replace the jsr with a bsr.
696 If multiple function calls are all based on a single register load
697 (i.e., the same function is called multiple times), the compiler
698 guarantees that each function call will have an R_SH_USES reloc.
699 Therefore, if the linker is able to convert each R_SH_USES reloc
700 which refers to that address, it can safely eliminate the register
703 When the assembler creates an R_SH_USES reloc, it examines it to
704 determine which address is being loaded (L1 in the above example).
705 It then counts the number of references to that address, and
706 creates an R_SH_COUNT reloc at that address. The r_offset field of
707 the R_SH_COUNT reloc will be the number of references. If the
708 linker is able to eliminate a register load, it can use the
709 R_SH_COUNT reloc to see whether it can also eliminate the function
712 SH relaxing also handles another, unrelated, matter. On the SH, if
713 a load or store instruction is not aligned on a four byte boundary,
714 the memory cycle interferes with the 32 bit instruction fetch,
715 causing a one cycle bubble in the pipeline. Therefore, we try to
716 align load and store instructions on four byte boundaries if we
717 can, by swapping them with one of the adjacent instructions. */
720 sh_relax_section (abfd, sec, link_info, again)
723 struct bfd_link_info *link_info;
726 struct internal_reloc *internal_relocs;
727 bfd_boolean have_code;
728 struct internal_reloc *irel, *irelend;
729 bfd_byte *contents = NULL;
733 if (link_info->relocatable
734 || (sec->flags & SEC_RELOC) == 0
735 || sec->reloc_count == 0)
738 if (coff_section_data (abfd, sec) == NULL)
740 bfd_size_type amt = sizeof (struct coff_section_tdata);
741 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
742 if (sec->used_by_bfd == NULL)
746 internal_relocs = (_bfd_coff_read_internal_relocs
747 (abfd, sec, link_info->keep_memory,
748 (bfd_byte *) NULL, FALSE,
749 (struct internal_reloc *) NULL));
750 if (internal_relocs == NULL)
755 irelend = internal_relocs + sec->reloc_count;
756 for (irel = internal_relocs; irel < irelend; irel++)
758 bfd_vma laddr, paddr, symval;
760 struct internal_reloc *irelfn, *irelscan, *irelcount;
761 struct internal_syment sym;
764 if (irel->r_type == R_SH_CODE)
767 if (irel->r_type != R_SH_USES)
770 /* Get the section contents. */
771 if (contents == NULL)
773 if (coff_section_data (abfd, sec)->contents != NULL)
774 contents = coff_section_data (abfd, sec)->contents;
777 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
782 /* The r_offset field of the R_SH_USES reloc will point us to
783 the register load. The 4 is because the r_offset field is
784 computed as though it were a jump offset, which are based
785 from 4 bytes after the jump instruction. */
786 laddr = irel->r_vaddr - sec->vma + 4;
787 /* Careful to sign extend the 32-bit offset. */
788 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
789 if (laddr >= sec->size)
791 (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
792 abfd, (unsigned long) irel->r_vaddr);
795 insn = bfd_get_16 (abfd, contents + laddr);
797 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
798 if ((insn & 0xf000) != 0xd000)
800 ((*_bfd_error_handler)
801 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
802 abfd, (unsigned long) irel->r_vaddr, insn));
806 /* Get the address from which the register is being loaded. The
807 displacement in the mov.l instruction is quadrupled. It is a
808 displacement from four bytes after the movl instruction, but,
809 before adding in the PC address, two least significant bits
810 of the PC are cleared. We assume that the section is aligned
811 on a four byte boundary. */
814 paddr += (laddr + 4) &~ (bfd_vma) 3;
815 if (paddr >= sec->size)
817 ((*_bfd_error_handler)
818 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
819 abfd, (unsigned long) irel->r_vaddr));
823 /* Get the reloc for the address from which the register is
824 being loaded. This reloc will tell us which function is
825 actually being called. */
827 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
828 if (irelfn->r_vaddr == paddr
830 && (irelfn->r_type == R_SH_IMM32
831 || irelfn->r_type == R_SH_IMM32CE
832 || irelfn->r_type == R_SH_IMAGEBASE)
835 && irelfn->r_type == R_SH_IMM32
839 if (irelfn >= irelend)
841 ((*_bfd_error_handler)
842 ("%B: 0x%lx: warning: could not find expected reloc",
843 abfd, (unsigned long) paddr));
847 /* Get the value of the symbol referred to by the reloc. */
848 if (! _bfd_coff_get_external_symbols (abfd))
850 bfd_coff_swap_sym_in (abfd,
851 ((bfd_byte *) obj_coff_external_syms (abfd)
853 * bfd_coff_symesz (abfd))),
855 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
857 ((*_bfd_error_handler)
858 ("%B: 0x%lx: warning: symbol in unexpected section",
859 abfd, (unsigned long) paddr));
863 if (sym.n_sclass != C_EXT)
865 symval = (sym.n_value
867 + sec->output_section->vma
868 + sec->output_offset);
872 struct coff_link_hash_entry *h;
874 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
875 BFD_ASSERT (h != NULL);
876 if (h->root.type != bfd_link_hash_defined
877 && h->root.type != bfd_link_hash_defweak)
879 /* This appears to be a reference to an undefined
880 symbol. Just ignore it--it will be caught by the
881 regular reloc processing. */
885 symval = (h->root.u.def.value
886 + h->root.u.def.section->output_section->vma
887 + h->root.u.def.section->output_offset);
890 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
892 /* See if this function call can be shortened. */
896 + sec->output_section->vma
899 if (foff < -0x1000 || foff >= 0x1000)
901 /* After all that work, we can't shorten this function call. */
905 /* Shorten the function call. */
907 /* For simplicity of coding, we are going to modify the section
908 contents, the section relocs, and the BFD symbol table. We
909 must tell the rest of the code not to free up this
910 information. It would be possible to instead create a table
911 of changes which have to be made, as is done in coff-mips.c;
912 that would be more work, but would require less memory when
913 the linker is run. */
915 coff_section_data (abfd, sec)->relocs = internal_relocs;
916 coff_section_data (abfd, sec)->keep_relocs = TRUE;
918 coff_section_data (abfd, sec)->contents = contents;
919 coff_section_data (abfd, sec)->keep_contents = TRUE;
921 obj_coff_keep_syms (abfd) = TRUE;
923 /* Replace the jsr with a bsr. */
925 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
926 replace the jsr with a bsr. */
927 irel->r_type = R_SH_PCDISP;
928 irel->r_symndx = irelfn->r_symndx;
929 if (sym.n_sclass != C_EXT)
931 /* If this needs to be changed because of future relaxing,
932 it will be handled here like other internal PCDISP
935 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
936 contents + irel->r_vaddr - sec->vma);
940 /* We can't fully resolve this yet, because the external
941 symbol value may be changed by future relaxing. We let
942 the final link phase handle it. */
943 bfd_put_16 (abfd, (bfd_vma) 0xb000,
944 contents + irel->r_vaddr - sec->vma);
947 /* See if there is another R_SH_USES reloc referring to the same
949 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
950 if (irelscan->r_type == R_SH_USES
951 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
953 if (irelscan < irelend)
955 /* Some other function call depends upon this register load,
956 and we have not yet converted that function call.
957 Indeed, we may never be able to convert it. There is
958 nothing else we can do at this point. */
962 /* Look for a R_SH_COUNT reloc on the location where the
963 function address is stored. Do this before deleting any
964 bytes, to avoid confusion about the address. */
965 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
966 if (irelcount->r_vaddr == paddr
967 && irelcount->r_type == R_SH_COUNT)
970 /* Delete the register load. */
971 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
974 /* That will change things, so, just in case it permits some
975 other function call to come within range, we should relax
976 again. Note that this is not required, and it may be slow. */
979 /* Now check whether we got a COUNT reloc. */
980 if (irelcount >= irelend)
982 ((*_bfd_error_handler)
983 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
984 abfd, (unsigned long) paddr));
988 /* The number of uses is stored in the r_offset field. We've
990 if (irelcount->r_offset == 0)
992 ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
993 abfd, (unsigned long) paddr));
997 --irelcount->r_offset;
999 /* If there are no more uses, we can delete the address. Reload
1000 the address from irelfn, in case it was changed by the
1001 previous call to sh_relax_delete_bytes. */
1002 if (irelcount->r_offset == 0)
1004 if (! sh_relax_delete_bytes (abfd, sec,
1005 irelfn->r_vaddr - sec->vma, 4))
1009 /* We've done all we can with that function call. */
1012 /* Look for load and store instructions that we can align on four
1016 bfd_boolean swapped;
1018 /* Get the section contents. */
1019 if (contents == NULL)
1021 if (coff_section_data (abfd, sec)->contents != NULL)
1022 contents = coff_section_data (abfd, sec)->contents;
1025 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1030 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1035 coff_section_data (abfd, sec)->relocs = internal_relocs;
1036 coff_section_data (abfd, sec)->keep_relocs = TRUE;
1038 coff_section_data (abfd, sec)->contents = contents;
1039 coff_section_data (abfd, sec)->keep_contents = TRUE;
1041 obj_coff_keep_syms (abfd) = TRUE;
1045 if (internal_relocs != NULL
1046 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1048 if (! link_info->keep_memory)
1049 free (internal_relocs);
1051 coff_section_data (abfd, sec)->relocs = internal_relocs;
1054 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1056 if (! link_info->keep_memory)
1059 /* Cache the section contents for coff_link_input_bfd. */
1060 coff_section_data (abfd, sec)->contents = contents;
1066 if (internal_relocs != NULL
1067 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1068 free (internal_relocs);
1069 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1074 /* Delete some bytes from a section while relaxing. */
1077 sh_relax_delete_bytes (abfd, sec, addr, count)
1084 struct internal_reloc *irel, *irelend;
1085 struct internal_reloc *irelalign;
1087 bfd_byte *esym, *esymend;
1088 bfd_size_type symesz;
1089 struct coff_link_hash_entry **sym_hash;
1092 contents = coff_section_data (abfd, sec)->contents;
1094 /* The deletion must stop at the next ALIGN reloc for an aligment
1095 power larger than the number of bytes we are deleting. */
1100 irel = coff_section_data (abfd, sec)->relocs;
1101 irelend = irel + sec->reloc_count;
1102 for (; irel < irelend; irel++)
1104 if (irel->r_type == R_SH_ALIGN
1105 && irel->r_vaddr - sec->vma > addr
1106 && count < (1 << irel->r_offset))
1109 toaddr = irel->r_vaddr - sec->vma;
1114 /* Actually delete the bytes. */
1115 memmove (contents + addr, contents + addr + count,
1116 (size_t) (toaddr - addr - count));
1117 if (irelalign == NULL)
1123 #define NOP_OPCODE (0x0009)
1125 BFD_ASSERT ((count & 1) == 0);
1126 for (i = 0; i < count; i += 2)
1127 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1130 /* Adjust all the relocs. */
1131 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1133 bfd_vma nraddr, stop;
1136 struct internal_syment sym;
1137 int off, adjust, oinsn;
1138 bfd_signed_vma voff = 0;
1139 bfd_boolean overflow;
1141 /* Get the new reloc address. */
1142 nraddr = irel->r_vaddr - sec->vma;
1143 if ((irel->r_vaddr - sec->vma > addr
1144 && irel->r_vaddr - sec->vma < toaddr)
1145 || (irel->r_type == R_SH_ALIGN
1146 && irel->r_vaddr - sec->vma == toaddr))
1149 /* See if this reloc was for the bytes we have deleted, in which
1150 case we no longer care about it. Don't delete relocs which
1151 represent addresses, though. */
1152 if (irel->r_vaddr - sec->vma >= addr
1153 && irel->r_vaddr - sec->vma < addr + count
1154 && irel->r_type != R_SH_ALIGN
1155 && irel->r_type != R_SH_CODE
1156 && irel->r_type != R_SH_DATA
1157 && irel->r_type != R_SH_LABEL)
1158 irel->r_type = R_SH_UNUSED;
1160 /* If this is a PC relative reloc, see if the range it covers
1161 includes the bytes we have deleted. */
1162 switch (irel->r_type)
1167 case R_SH_PCDISP8BY2:
1169 case R_SH_PCRELIMM8BY2:
1170 case R_SH_PCRELIMM8BY4:
1171 start = irel->r_vaddr - sec->vma;
1172 insn = bfd_get_16 (abfd, contents + nraddr);
1176 switch (irel->r_type)
1179 start = stop = addr;
1185 case R_SH_IMAGEBASE:
1187 /* If this reloc is against a symbol defined in this
1188 section, and the symbol will not be adjusted below, we
1189 must check the addend to see it will put the value in
1190 range to be adjusted, and hence must be changed. */
1191 bfd_coff_swap_sym_in (abfd,
1192 ((bfd_byte *) obj_coff_external_syms (abfd)
1194 * bfd_coff_symesz (abfd))),
1196 if (sym.n_sclass != C_EXT
1197 && sym.n_scnum == sec->target_index
1198 && ((bfd_vma) sym.n_value <= addr
1199 || (bfd_vma) sym.n_value >= toaddr))
1203 val = bfd_get_32 (abfd, contents + nraddr);
1205 if (val > addr && val < toaddr)
1206 bfd_put_32 (abfd, val - count, contents + nraddr);
1208 start = stop = addr;
1211 case R_SH_PCDISP8BY2:
1215 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1219 bfd_coff_swap_sym_in (abfd,
1220 ((bfd_byte *) obj_coff_external_syms (abfd)
1222 * bfd_coff_symesz (abfd))),
1224 if (sym.n_sclass == C_EXT)
1225 start = stop = addr;
1231 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1235 case R_SH_PCRELIMM8BY2:
1237 stop = start + 4 + off * 2;
1240 case R_SH_PCRELIMM8BY4:
1242 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1248 /* These relocs types represent
1250 The r_offset field holds the difference between the reloc
1251 address and L1. That is the start of the reloc, and
1252 adding in the contents gives us the top. We must adjust
1253 both the r_offset field and the section contents. */
1255 start = irel->r_vaddr - sec->vma;
1256 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1260 && (stop <= addr || stop >= toaddr))
1261 irel->r_offset += count;
1262 else if (stop > addr
1264 && (start <= addr || start >= toaddr))
1265 irel->r_offset -= count;
1269 if (irel->r_type == R_SH_SWITCH16)
1270 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1271 else if (irel->r_type == R_SH_SWITCH8)
1272 voff = bfd_get_8 (abfd, contents + nraddr);
1274 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1275 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1280 start = irel->r_vaddr - sec->vma;
1281 stop = (bfd_vma) ((bfd_signed_vma) start
1282 + (long) irel->r_offset
1289 && (stop <= addr || stop >= toaddr))
1291 else if (stop > addr
1293 && (start <= addr || start >= toaddr))
1302 switch (irel->r_type)
1308 case R_SH_PCDISP8BY2:
1309 case R_SH_PCRELIMM8BY2:
1311 if ((oinsn & 0xff00) != (insn & 0xff00))
1313 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1318 if ((oinsn & 0xf000) != (insn & 0xf000))
1320 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1323 case R_SH_PCRELIMM8BY4:
1324 BFD_ASSERT (adjust == count || count >= 4);
1329 if ((irel->r_vaddr & 3) == 0)
1332 if ((oinsn & 0xff00) != (insn & 0xff00))
1334 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1339 if (voff < 0 || voff >= 0xff)
1341 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1346 if (voff < - 0x8000 || voff >= 0x8000)
1348 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1353 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1357 irel->r_offset += adjust;
1363 ((*_bfd_error_handler)
1364 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1365 abfd, (unsigned long) irel->r_vaddr));
1366 bfd_set_error (bfd_error_bad_value);
1371 irel->r_vaddr = nraddr + sec->vma;
1374 /* Look through all the other sections. If there contain any IMM32
1375 relocs against internal symbols which we are not going to adjust
1376 below, we may need to adjust the addends. */
1377 for (o = abfd->sections; o != NULL; o = o->next)
1379 struct internal_reloc *internal_relocs;
1380 struct internal_reloc *irelscan, *irelscanend;
1381 bfd_byte *ocontents;
1384 || (o->flags & SEC_RELOC) == 0
1385 || o->reloc_count == 0)
1388 /* We always cache the relocs. Perhaps, if info->keep_memory is
1389 FALSE, we should free them, if we are permitted to, when we
1390 leave sh_coff_relax_section. */
1391 internal_relocs = (_bfd_coff_read_internal_relocs
1392 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1393 (struct internal_reloc *) NULL));
1394 if (internal_relocs == NULL)
1398 irelscanend = internal_relocs + o->reloc_count;
1399 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1401 struct internal_syment sym;
1404 if (irelscan->r_type != R_SH_IMM32
1405 && irelscan->r_type != R_SH_IMAGEBASE
1406 && irelscan->r_type != R_SH_IMM32CE)
1408 if (irelscan->r_type != R_SH_IMM32)
1412 bfd_coff_swap_sym_in (abfd,
1413 ((bfd_byte *) obj_coff_external_syms (abfd)
1414 + (irelscan->r_symndx
1415 * bfd_coff_symesz (abfd))),
1417 if (sym.n_sclass != C_EXT
1418 && sym.n_scnum == sec->target_index
1419 && ((bfd_vma) sym.n_value <= addr
1420 || (bfd_vma) sym.n_value >= toaddr))
1424 if (ocontents == NULL)
1426 if (coff_section_data (abfd, o)->contents != NULL)
1427 ocontents = coff_section_data (abfd, o)->contents;
1430 if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1432 /* We always cache the section contents.
1433 Perhaps, if info->keep_memory is FALSE, we
1434 should free them, if we are permitted to,
1435 when we leave sh_coff_relax_section. */
1436 coff_section_data (abfd, o)->contents = ocontents;
1440 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1442 if (val > addr && val < toaddr)
1443 bfd_put_32 (abfd, val - count,
1444 ocontents + irelscan->r_vaddr - o->vma);
1446 coff_section_data (abfd, o)->keep_contents = TRUE;
1451 /* Adjusting the internal symbols will not work if something has
1452 already retrieved the generic symbols. It would be possible to
1453 make this work by adjusting the generic symbols at the same time.
1454 However, this case should not arise in normal usage. */
1455 if (obj_symbols (abfd) != NULL
1456 || obj_raw_syments (abfd) != NULL)
1458 ((*_bfd_error_handler)
1459 ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1460 bfd_set_error (bfd_error_invalid_operation);
1464 /* Adjust all the symbols. */
1465 sym_hash = obj_coff_sym_hashes (abfd);
1466 symesz = bfd_coff_symesz (abfd);
1467 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1468 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1469 while (esym < esymend)
1471 struct internal_syment isym;
1473 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1475 if (isym.n_scnum == sec->target_index
1476 && (bfd_vma) isym.n_value > addr
1477 && (bfd_vma) isym.n_value < toaddr)
1479 isym.n_value -= count;
1481 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1483 if (*sym_hash != NULL)
1485 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1486 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1487 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1488 && (*sym_hash)->root.u.def.value < toaddr);
1489 (*sym_hash)->root.u.def.value -= count;
1493 esym += (isym.n_numaux + 1) * symesz;
1494 sym_hash += isym.n_numaux + 1;
1497 /* See if we can move the ALIGN reloc forward. We have adjusted
1498 r_vaddr for it already. */
1499 if (irelalign != NULL)
1501 bfd_vma alignto, alignaddr;
1503 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1504 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1505 1 << irelalign->r_offset);
1506 if (alignto != alignaddr)
1508 /* Tail recursion. */
1509 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1510 (int) (alignto - alignaddr));
1517 /* This is yet another version of the SH opcode table, used to rapidly
1518 get information about a particular instruction. */
1520 /* The opcode map is represented by an array of these structures. The
1521 array is indexed by the high order four bits in the instruction. */
1523 struct sh_major_opcode
1525 /* A pointer to the instruction list. This is an array which
1526 contains all the instructions with this major opcode. */
1527 const struct sh_minor_opcode *minor_opcodes;
1528 /* The number of elements in minor_opcodes. */
1529 unsigned short count;
1532 /* This structure holds information for a set of SH opcodes. The
1533 instruction code is anded with the mask value, and the resulting
1534 value is used to search the order opcode list. */
1536 struct sh_minor_opcode
1538 /* The sorted opcode list. */
1539 const struct sh_opcode *opcodes;
1540 /* The number of elements in opcodes. */
1541 unsigned short count;
1542 /* The mask value to use when searching the opcode list. */
1543 unsigned short mask;
1546 /* This structure holds information for an SH instruction. An array
1547 of these structures is sorted in order by opcode. */
1551 /* The code for this instruction, after it has been anded with the
1552 mask value in the sh_major_opcode structure. */
1553 unsigned short opcode;
1554 /* Flags for this instruction. */
1555 unsigned long flags;
1558 /* Flag which appear in the sh_opcode structure. */
1560 /* This instruction loads a value from memory. */
1563 /* This instruction stores a value to memory. */
1566 /* This instruction is a branch. */
1567 #define BRANCH (0x4)
1569 /* This instruction has a delay slot. */
1572 /* This instruction uses the value in the register in the field at
1573 mask 0x0f00 of the instruction. */
1574 #define USES1 (0x10)
1575 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1577 /* This instruction uses the value in the register in the field at
1578 mask 0x00f0 of the instruction. */
1579 #define USES2 (0x20)
1580 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1582 /* This instruction uses the value in register 0. */
1583 #define USESR0 (0x40)
1585 /* This instruction sets the value in the register in the field at
1586 mask 0x0f00 of the instruction. */
1587 #define SETS1 (0x80)
1588 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1590 /* This instruction sets the value in the register in the field at
1591 mask 0x00f0 of the instruction. */
1592 #define SETS2 (0x100)
1593 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1595 /* This instruction sets register 0. */
1596 #define SETSR0 (0x200)
1598 /* This instruction sets a special register. */
1599 #define SETSSP (0x400)
1601 /* This instruction uses a special register. */
1602 #define USESSP (0x800)
1604 /* This instruction uses the floating point register in the field at
1605 mask 0x0f00 of the instruction. */
1606 #define USESF1 (0x1000)
1607 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1609 /* This instruction uses the floating point register in the field at
1610 mask 0x00f0 of the instruction. */
1611 #define USESF2 (0x2000)
1612 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1614 /* This instruction uses floating point register 0. */
1615 #define USESF0 (0x4000)
1617 /* This instruction sets the floating point register in the field at
1618 mask 0x0f00 of the instruction. */
1619 #define SETSF1 (0x8000)
1620 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1622 #define USESAS (0x10000)
1623 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1624 #define USESR8 (0x20000)
1625 #define SETSAS (0x40000)
1626 #define SETSAS_REG(x) USESAS_REG (x)
1628 #define MAP(a) a, sizeof a / sizeof a[0]
1630 #ifndef COFF_IMAGE_WITH_PE
1631 static bfd_boolean sh_insn_uses_reg
1632 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1633 static bfd_boolean sh_insn_sets_reg
1634 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1635 static bfd_boolean sh_insn_uses_or_sets_reg
1636 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1637 static bfd_boolean sh_insn_uses_freg
1638 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1639 static bfd_boolean sh_insn_sets_freg
1640 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1641 static bfd_boolean sh_insn_uses_or_sets_freg
1642 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1643 static bfd_boolean sh_insns_conflict
1644 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1645 const struct sh_opcode *));
1646 static bfd_boolean sh_load_use
1647 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1648 const struct sh_opcode *));
1650 /* The opcode maps. */
1652 static const struct sh_opcode sh_opcode00[] =
1654 { 0x0008, SETSSP }, /* clrt */
1655 { 0x0009, 0 }, /* nop */
1656 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1657 { 0x0018, SETSSP }, /* sett */
1658 { 0x0019, SETSSP }, /* div0u */
1659 { 0x001b, 0 }, /* sleep */
1660 { 0x0028, SETSSP }, /* clrmac */
1661 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1662 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1663 { 0x0048, SETSSP }, /* clrs */
1664 { 0x0058, SETSSP } /* sets */
1667 static const struct sh_opcode sh_opcode01[] =
1669 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1670 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1671 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1672 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1673 { 0x0029, SETS1 | USESSP }, /* movt rn */
1674 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1675 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1676 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1677 { 0x0083, LOAD | USES1 }, /* pref @rn */
1678 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1679 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1680 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1681 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1682 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1685 static const struct sh_opcode sh_opcode02[] =
1687 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1688 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1689 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1690 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1691 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1692 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1693 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1694 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1695 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1698 static const struct sh_minor_opcode sh_opcode0[] =
1700 { MAP (sh_opcode00), 0xffff },
1701 { MAP (sh_opcode01), 0xf0ff },
1702 { MAP (sh_opcode02), 0xf00f }
1705 static const struct sh_opcode sh_opcode10[] =
1707 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1710 static const struct sh_minor_opcode sh_opcode1[] =
1712 { MAP (sh_opcode10), 0xf000 }
1715 static const struct sh_opcode sh_opcode20[] =
1717 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1718 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1719 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1720 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1721 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1722 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1723 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1724 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1725 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1726 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1727 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1728 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1729 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1730 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1731 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1734 static const struct sh_minor_opcode sh_opcode2[] =
1736 { MAP (sh_opcode20), 0xf00f }
1739 static const struct sh_opcode sh_opcode30[] =
1741 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1742 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1743 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1744 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1745 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1746 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1747 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1748 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1749 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1750 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1751 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1752 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1753 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1754 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1757 static const struct sh_minor_opcode sh_opcode3[] =
1759 { MAP (sh_opcode30), 0xf00f }
1762 static const struct sh_opcode sh_opcode40[] =
1764 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1765 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1766 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1767 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1768 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1769 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1770 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1771 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1772 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1773 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1774 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1775 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1776 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1777 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1778 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1779 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1780 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1781 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1782 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1783 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1784 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1785 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1786 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1787 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1788 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1789 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1790 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1791 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1792 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1793 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1794 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1795 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1796 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1797 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1798 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1799 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1800 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1801 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1802 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1803 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1804 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1805 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1806 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1807 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1808 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1809 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1810 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1811 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1812 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1813 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1814 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1817 static const struct sh_opcode sh_opcode41[] =
1819 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1820 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1821 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1822 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1823 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1824 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1827 static const struct sh_minor_opcode sh_opcode4[] =
1829 { MAP (sh_opcode40), 0xf0ff },
1830 { MAP (sh_opcode41), 0xf00f }
1833 static const struct sh_opcode sh_opcode50[] =
1835 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1838 static const struct sh_minor_opcode sh_opcode5[] =
1840 { MAP (sh_opcode50), 0xf000 }
1843 static const struct sh_opcode sh_opcode60[] =
1845 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1846 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1847 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1848 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1849 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1850 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1851 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1852 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1853 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1854 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1855 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1856 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1857 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1858 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1859 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1860 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1863 static const struct sh_minor_opcode sh_opcode6[] =
1865 { MAP (sh_opcode60), 0xf00f }
1868 static const struct sh_opcode sh_opcode70[] =
1870 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1873 static const struct sh_minor_opcode sh_opcode7[] =
1875 { MAP (sh_opcode70), 0xf000 }
1878 static const struct sh_opcode sh_opcode80[] =
1880 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1881 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1882 { 0x8200, SETSSP }, /* setrc #imm */
1883 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1884 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1885 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1886 { 0x8900, BRANCH | USESSP }, /* bt label */
1887 { 0x8b00, BRANCH | USESSP }, /* bf label */
1888 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1889 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1890 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1891 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1894 static const struct sh_minor_opcode sh_opcode8[] =
1896 { MAP (sh_opcode80), 0xff00 }
1899 static const struct sh_opcode sh_opcode90[] =
1901 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1904 static const struct sh_minor_opcode sh_opcode9[] =
1906 { MAP (sh_opcode90), 0xf000 }
1909 static const struct sh_opcode sh_opcodea0[] =
1911 { 0xa000, BRANCH | DELAY } /* bra label */
1914 static const struct sh_minor_opcode sh_opcodea[] =
1916 { MAP (sh_opcodea0), 0xf000 }
1919 static const struct sh_opcode sh_opcodeb0[] =
1921 { 0xb000, BRANCH | DELAY } /* bsr label */
1924 static const struct sh_minor_opcode sh_opcodeb[] =
1926 { MAP (sh_opcodeb0), 0xf000 }
1929 static const struct sh_opcode sh_opcodec0[] =
1931 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1932 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1933 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1934 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1935 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1936 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1937 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1938 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1939 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1940 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1941 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1942 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1943 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1944 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1945 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1946 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1949 static const struct sh_minor_opcode sh_opcodec[] =
1951 { MAP (sh_opcodec0), 0xff00 }
1954 static const struct sh_opcode sh_opcoded0[] =
1956 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1959 static const struct sh_minor_opcode sh_opcoded[] =
1961 { MAP (sh_opcoded0), 0xf000 }
1964 static const struct sh_opcode sh_opcodee0[] =
1966 { 0xe000, SETS1 } /* mov #imm,rn */
1969 static const struct sh_minor_opcode sh_opcodee[] =
1971 { MAP (sh_opcodee0), 0xf000 }
1974 static const struct sh_opcode sh_opcodef0[] =
1976 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1977 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1978 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1979 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1980 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1981 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1982 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1983 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1984 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1985 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1986 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1987 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1988 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1989 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1992 static const struct sh_opcode sh_opcodef1[] =
1994 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1995 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1996 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1997 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1998 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1999 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
2000 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
2001 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
2002 { 0xf08d, SETSF1 }, /* fldi0 fn */
2003 { 0xf09d, SETSF1 } /* fldi1 fn */
2006 static const struct sh_minor_opcode sh_opcodef[] =
2008 { MAP (sh_opcodef0), 0xf00f },
2009 { MAP (sh_opcodef1), 0xf0ff }
2012 static struct sh_major_opcode sh_opcodes[] =
2014 { MAP (sh_opcode0) },
2015 { MAP (sh_opcode1) },
2016 { MAP (sh_opcode2) },
2017 { MAP (sh_opcode3) },
2018 { MAP (sh_opcode4) },
2019 { MAP (sh_opcode5) },
2020 { MAP (sh_opcode6) },
2021 { MAP (sh_opcode7) },
2022 { MAP (sh_opcode8) },
2023 { MAP (sh_opcode9) },
2024 { MAP (sh_opcodea) },
2025 { MAP (sh_opcodeb) },
2026 { MAP (sh_opcodec) },
2027 { MAP (sh_opcoded) },
2028 { MAP (sh_opcodee) },
2029 { MAP (sh_opcodef) }
2032 /* The double data transfer / parallel processing insns are not
2033 described here. This will cause sh_align_load_span to leave them alone. */
2035 static const struct sh_opcode sh_dsp_opcodef0[] =
2037 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2038 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2039 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2040 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2041 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2042 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2043 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2044 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2047 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2049 { MAP (sh_dsp_opcodef0), 0xfc0d }
2052 /* Given an instruction, return a pointer to the corresponding
2053 sh_opcode structure. Return NULL if the instruction is not
2056 static const struct sh_opcode *
2060 const struct sh_major_opcode *maj;
2061 const struct sh_minor_opcode *min, *minend;
2063 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2064 min = maj->minor_opcodes;
2065 minend = min + maj->count;
2066 for (; min < minend; min++)
2069 const struct sh_opcode *op, *opend;
2071 l = insn & min->mask;
2073 opend = op + min->count;
2075 /* Since the opcodes tables are sorted, we could use a binary
2076 search here if the count were above some cutoff value. */
2077 for (; op < opend; op++)
2078 if (op->opcode == l)
2085 /* See whether an instruction uses or sets a general purpose register */
2088 sh_insn_uses_or_sets_reg (insn, op, reg)
2090 const struct sh_opcode *op;
2093 if (sh_insn_uses_reg (insn, op, reg))
2096 return sh_insn_sets_reg (insn, op, reg);
2099 /* See whether an instruction uses a general purpose register. */
2102 sh_insn_uses_reg (insn, op, reg)
2104 const struct sh_opcode *op;
2111 if ((f & USES1) != 0
2112 && USES1_REG (insn) == reg)
2114 if ((f & USES2) != 0
2115 && USES2_REG (insn) == reg)
2117 if ((f & USESR0) != 0
2120 if ((f & USESAS) && reg == USESAS_REG (insn))
2122 if ((f & USESR8) && reg == 8)
2128 /* See whether an instruction sets a general purpose register. */
2131 sh_insn_sets_reg (insn, op, reg)
2133 const struct sh_opcode *op;
2140 if ((f & SETS1) != 0
2141 && SETS1_REG (insn) == reg)
2143 if ((f & SETS2) != 0
2144 && SETS2_REG (insn) == reg)
2146 if ((f & SETSR0) != 0
2149 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2155 /* See whether an instruction uses or sets a floating point register */
2158 sh_insn_uses_or_sets_freg (insn, op, reg)
2160 const struct sh_opcode *op;
2163 if (sh_insn_uses_freg (insn, op, reg))
2166 return sh_insn_sets_freg (insn, op, reg);
2169 /* See whether an instruction uses a floating point register. */
2172 sh_insn_uses_freg (insn, op, freg)
2174 const struct sh_opcode *op;
2181 /* We can't tell if this is a double-precision insn, so just play safe
2182 and assume that it might be. So not only have we test FREG against
2183 itself, but also even FREG against FREG+1 - if the using insn uses
2184 just the low part of a double precision value - but also an odd
2185 FREG against FREG-1 - if the setting insn sets just the low part
2186 of a double precision value.
2187 So what this all boils down to is that we have to ignore the lowest
2188 bit of the register number. */
2190 if ((f & USESF1) != 0
2191 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2193 if ((f & USESF2) != 0
2194 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2196 if ((f & USESF0) != 0
2203 /* See whether an instruction sets a floating point register. */
2206 sh_insn_sets_freg (insn, op, freg)
2208 const struct sh_opcode *op;
2215 /* We can't tell if this is a double-precision insn, so just play safe
2216 and assume that it might be. So not only have we test FREG against
2217 itself, but also even FREG against FREG+1 - if the using insn uses
2218 just the low part of a double precision value - but also an odd
2219 FREG against FREG-1 - if the setting insn sets just the low part
2220 of a double precision value.
2221 So what this all boils down to is that we have to ignore the lowest
2222 bit of the register number. */
2224 if ((f & SETSF1) != 0
2225 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2231 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2232 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2233 This should return TRUE if there is a conflict, or FALSE if the
2234 instructions can be swapped safely. */
2237 sh_insns_conflict (i1, op1, i2, op2)
2239 const struct sh_opcode *op1;
2241 const struct sh_opcode *op2;
2243 unsigned int f1, f2;
2248 /* Load of fpscr conflicts with floating point operations.
2249 FIXME: shouldn't test raw opcodes here. */
2250 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2251 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2254 if ((f1 & (BRANCH | DELAY)) != 0
2255 || (f2 & (BRANCH | DELAY)) != 0)
2258 if (((f1 | f2) & SETSSP)
2259 && (f1 & (SETSSP | USESSP))
2260 && (f2 & (SETSSP | USESSP)))
2263 if ((f1 & SETS1) != 0
2264 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2266 if ((f1 & SETS2) != 0
2267 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2269 if ((f1 & SETSR0) != 0
2270 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2273 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2275 if ((f1 & SETSF1) != 0
2276 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2279 if ((f2 & SETS1) != 0
2280 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2282 if ((f2 & SETS2) != 0
2283 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2285 if ((f2 & SETSR0) != 0
2286 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2289 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2291 if ((f2 & SETSF1) != 0
2292 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2295 /* The instructions do not conflict. */
2299 /* I1 is a load instruction, and I2 is some other instruction. Return
2300 TRUE if I1 loads a register which I2 uses. */
2303 sh_load_use (i1, op1, i2, op2)
2305 const struct sh_opcode *op1;
2307 const struct sh_opcode *op2;
2313 if ((f1 & LOAD) == 0)
2316 /* If both SETS1 and SETSSP are set, that means a load to a special
2317 register using postincrement addressing mode, which we don't care
2319 if ((f1 & SETS1) != 0
2320 && (f1 & SETSSP) == 0
2321 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2324 if ((f1 & SETSR0) != 0
2325 && sh_insn_uses_reg (i2, op2, 0))
2328 if ((f1 & SETSF1) != 0
2329 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2335 /* Try to align loads and stores within a span of memory. This is
2336 called by both the ELF and the COFF sh targets. ABFD and SEC are
2337 the BFD and section we are examining. CONTENTS is the contents of
2338 the section. SWAP is the routine to call to swap two instructions.
2339 RELOCS is a pointer to the internal relocation information, to be
2340 passed to SWAP. PLABEL is a pointer to the current label in a
2341 sorted list of labels; LABEL_END is the end of the list. START and
2342 STOP are the range of memory to examine. If a swap is made,
2343 *PSWAPPED is set to TRUE. */
2349 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2350 plabel, label_end, start, stop, pswapped)
2354 bfd_boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2360 bfd_boolean *pswapped;
2362 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2363 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2366 /* The SH4 has a Harvard architecture, hence aligning loads is not
2367 desirable. In fact, it is counter-productive, since it interferes
2368 with the schedules generated by the compiler. */
2369 if (abfd->arch_info->mach == bfd_mach_sh4)
2372 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2376 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2377 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2380 /* Instructions should be aligned on 2 byte boundaries. */
2381 if ((start & 1) == 1)
2384 /* Now look through the unaligned addresses. */
2388 for (; i < stop; i += 4)
2391 const struct sh_opcode *op;
2392 unsigned int prev_insn = 0;
2393 const struct sh_opcode *prev_op = NULL;
2395 insn = bfd_get_16 (abfd, contents + i);
2396 op = sh_insn_info (insn);
2398 || (op->flags & (LOAD | STORE)) == 0)
2401 /* This is a load or store which is not on a four byte boundary. */
2403 while (*plabel < label_end && **plabel < i)
2408 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2409 /* If INSN is the field b of a parallel processing insn, it is not
2410 a load / store after all. Note that the test here might mistake
2411 the field_b of a pcopy insn for the starting code of a parallel
2412 processing insn; this might miss a swapping opportunity, but at
2413 least we're on the safe side. */
2414 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2417 /* Check if prev_insn is actually the field b of a parallel
2418 processing insn. Again, this can give a spurious match
2420 if (dsp && i - 2 > start)
2422 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2424 if ((pprev_insn & 0xfc00) == 0xf800)
2427 prev_op = sh_insn_info (prev_insn);
2430 prev_op = sh_insn_info (prev_insn);
2432 /* If the load/store instruction is in a delay slot, we
2435 || (prev_op->flags & DELAY) != 0)
2439 && (*plabel >= label_end || **plabel != i)
2441 && (prev_op->flags & (LOAD | STORE)) == 0
2442 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2446 /* The load/store instruction does not have a label, and
2447 there is a previous instruction; PREV_INSN is not
2448 itself a load/store instruction, and PREV_INSN and
2449 INSN do not conflict. */
2455 unsigned int prev2_insn;
2456 const struct sh_opcode *prev2_op;
2458 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2459 prev2_op = sh_insn_info (prev2_insn);
2461 /* If the instruction before PREV_INSN has a delay
2462 slot--that is, PREV_INSN is in a delay slot--we
2464 if (prev2_op == NULL
2465 || (prev2_op->flags & DELAY) != 0)
2468 /* If the instruction before PREV_INSN is a load,
2469 and it sets a register which INSN uses, then
2470 putting INSN immediately after PREV_INSN will
2471 cause a pipeline bubble, so there is no point to
2474 && (prev2_op->flags & LOAD) != 0
2475 && sh_load_use (prev2_insn, prev2_op, insn, op))
2481 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2488 while (*plabel < label_end && **plabel < i + 2)
2492 && (*plabel >= label_end || **plabel != i + 2))
2494 unsigned int next_insn;
2495 const struct sh_opcode *next_op;
2497 /* There is an instruction after the load/store
2498 instruction, and it does not have a label. */
2499 next_insn = bfd_get_16 (abfd, contents + i + 2);
2500 next_op = sh_insn_info (next_insn);
2502 && (next_op->flags & (LOAD | STORE)) == 0
2503 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2507 /* NEXT_INSN is not itself a load/store instruction,
2508 and it does not conflict with INSN. */
2512 /* If PREV_INSN is a load, and it sets a register
2513 which NEXT_INSN uses, then putting NEXT_INSN
2514 immediately after PREV_INSN will cause a pipeline
2515 bubble, so there is no reason to make this swap. */
2517 && (prev_op->flags & LOAD) != 0
2518 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2521 /* If INSN is a load, and it sets a register which
2522 the insn after NEXT_INSN uses, then doing the
2523 swap will cause a pipeline bubble, so there is no
2524 reason to make the swap. However, if the insn
2525 after NEXT_INSN is itself a load or store
2526 instruction, then it is misaligned, so
2527 optimistically hope that it will be swapped
2528 itself, and just live with the pipeline bubble if
2532 && (op->flags & LOAD) != 0)
2534 unsigned int next2_insn;
2535 const struct sh_opcode *next2_op;
2537 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2538 next2_op = sh_insn_info (next2_insn);
2539 if (next2_op == NULL
2540 || ((next2_op->flags & (LOAD | STORE)) == 0
2541 && sh_load_use (insn, op, next2_insn, next2_op)))
2547 if (! (*swap) (abfd, sec, relocs, contents, i))
2558 #endif /* not COFF_IMAGE_WITH_PE */
2560 /* Look for loads and stores which we can align to four byte
2561 boundaries. See the longer comment above sh_relax_section for why
2562 this is desirable. This sets *PSWAPPED if some instruction was
2566 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2569 struct internal_reloc *internal_relocs;
2571 bfd_boolean *pswapped;
2573 struct internal_reloc *irel, *irelend;
2574 bfd_vma *labels = NULL;
2575 bfd_vma *label, *label_end;
2580 irelend = internal_relocs + sec->reloc_count;
2582 /* Get all the addresses with labels on them. */
2583 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2584 labels = (bfd_vma *) bfd_malloc (amt);
2588 for (irel = internal_relocs; irel < irelend; irel++)
2590 if (irel->r_type == R_SH_LABEL)
2592 *label_end = irel->r_vaddr - sec->vma;
2597 /* Note that the assembler currently always outputs relocs in
2598 address order. If that ever changes, this code will need to sort
2599 the label values and the relocs. */
2603 for (irel = internal_relocs; irel < irelend; irel++)
2605 bfd_vma start, stop;
2607 if (irel->r_type != R_SH_CODE)
2610 start = irel->r_vaddr - sec->vma;
2612 for (irel++; irel < irelend; irel++)
2613 if (irel->r_type == R_SH_DATA)
2616 stop = irel->r_vaddr - sec->vma;
2620 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2621 (PTR) internal_relocs, &label,
2622 label_end, start, stop, pswapped))
2636 /* Swap two SH instructions. */
2639 sh_swap_insns (abfd, sec, relocs, contents, addr)
2646 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2647 unsigned short i1, i2;
2648 struct internal_reloc *irel, *irelend;
2650 /* Swap the instructions themselves. */
2651 i1 = bfd_get_16 (abfd, contents + addr);
2652 i2 = bfd_get_16 (abfd, contents + addr + 2);
2653 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2654 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2656 /* Adjust all reloc addresses. */
2657 irelend = internal_relocs + sec->reloc_count;
2658 for (irel = internal_relocs; irel < irelend; irel++)
2662 /* There are a few special types of relocs that we don't want to
2663 adjust. These relocs do not apply to the instruction itself,
2664 but are only associated with the address. */
2665 type = irel->r_type;
2666 if (type == R_SH_ALIGN
2667 || type == R_SH_CODE
2668 || type == R_SH_DATA
2669 || type == R_SH_LABEL)
2672 /* If an R_SH_USES reloc points to one of the addresses being
2673 swapped, we must adjust it. It would be incorrect to do this
2674 for a jump, though, since we want to execute both
2675 instructions after the jump. (We have avoided swapping
2676 around a label, so the jump will not wind up executing an
2677 instruction it shouldn't). */
2678 if (type == R_SH_USES)
2682 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2684 irel->r_offset += 2;
2685 else if (off == addr + 2)
2686 irel->r_offset -= 2;
2689 if (irel->r_vaddr - sec->vma == addr)
2694 else if (irel->r_vaddr - sec->vma == addr + 2)
2705 unsigned short insn, oinsn;
2706 bfd_boolean overflow;
2708 loc = contents + irel->r_vaddr - sec->vma;
2715 case R_SH_PCDISP8BY2:
2716 case R_SH_PCRELIMM8BY2:
2717 insn = bfd_get_16 (abfd, loc);
2720 if ((oinsn & 0xff00) != (insn & 0xff00))
2722 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2726 insn = bfd_get_16 (abfd, loc);
2729 if ((oinsn & 0xf000) != (insn & 0xf000))
2731 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2734 case R_SH_PCRELIMM8BY4:
2735 /* This reloc ignores the least significant 3 bits of
2736 the program counter before adding in the offset.
2737 This means that if ADDR is at an even address, the
2738 swap will not affect the offset. If ADDR is an at an
2739 odd address, then the instruction will be crossing a
2740 four byte boundary, and must be adjusted. */
2741 if ((addr & 3) != 0)
2743 insn = bfd_get_16 (abfd, loc);
2746 if ((oinsn & 0xff00) != (insn & 0xff00))
2748 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2756 ((*_bfd_error_handler)
2757 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2758 abfd, (unsigned long) irel->r_vaddr));
2759 bfd_set_error (bfd_error_bad_value);
2768 /* This is a modification of _bfd_coff_generic_relocate_section, which
2769 will handle SH relaxing. */
2772 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2773 relocs, syms, sections)
2774 bfd *output_bfd ATTRIBUTE_UNUSED;
2775 struct bfd_link_info *info;
2777 asection *input_section;
2779 struct internal_reloc *relocs;
2780 struct internal_syment *syms;
2781 asection **sections;
2783 struct internal_reloc *rel;
2784 struct internal_reloc *relend;
2787 relend = rel + input_section->reloc_count;
2788 for (; rel < relend; rel++)
2791 struct coff_link_hash_entry *h;
2792 struct internal_syment *sym;
2795 reloc_howto_type *howto;
2796 bfd_reloc_status_type rstat;
2798 /* Almost all relocs have to do with relaxing. If any work must
2799 be done for them, it has been done in sh_relax_section. */
2800 if (rel->r_type != R_SH_IMM32
2802 && rel->r_type != R_SH_IMM32CE
2803 && rel->r_type != R_SH_IMAGEBASE
2805 && rel->r_type != R_SH_PCDISP)
2808 symndx = rel->r_symndx;
2818 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2820 (*_bfd_error_handler)
2821 ("%B: illegal symbol index %ld in relocs",
2823 bfd_set_error (bfd_error_bad_value);
2826 h = obj_coff_sym_hashes (input_bfd)[symndx];
2827 sym = syms + symndx;
2830 if (sym != NULL && sym->n_scnum != 0)
2831 addend = - sym->n_value;
2835 if (rel->r_type == R_SH_PCDISP)
2838 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2841 howto = &sh_coff_howtos[rel->r_type];
2845 bfd_set_error (bfd_error_bad_value);
2850 if (rel->r_type == R_SH_IMAGEBASE)
2851 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2860 /* There is nothing to do for an internal PCDISP reloc. */
2861 if (rel->r_type == R_SH_PCDISP)
2866 sec = bfd_abs_section_ptr;
2871 sec = sections[symndx];
2872 val = (sec->output_section->vma
2873 + sec->output_offset
2880 if (h->root.type == bfd_link_hash_defined
2881 || h->root.type == bfd_link_hash_defweak)
2885 sec = h->root.u.def.section;
2886 val = (h->root.u.def.value
2887 + sec->output_section->vma
2888 + sec->output_offset);
2890 else if (! info->relocatable)
2892 if (! ((*info->callbacks->undefined_symbol)
2893 (info, h->root.root.string, input_bfd, input_section,
2894 rel->r_vaddr - input_section->vma, TRUE)))
2899 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2901 rel->r_vaddr - input_section->vma,
2910 case bfd_reloc_overflow:
2913 char buf[SYMNMLEN + 1];
2919 else if (sym->_n._n_n._n_zeroes == 0
2920 && sym->_n._n_n._n_offset != 0)
2921 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2924 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2925 buf[SYMNMLEN] = '\0';
2929 if (! ((*info->callbacks->reloc_overflow)
2930 (info, (h ? &h->root : NULL), name, howto->name,
2931 (bfd_vma) 0, input_bfd, input_section,
2932 rel->r_vaddr - input_section->vma)))
2941 /* This is a version of bfd_generic_get_relocated_section_contents
2942 which uses sh_relocate_section. */
2945 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2946 data, relocatable, symbols)
2948 struct bfd_link_info *link_info;
2949 struct bfd_link_order *link_order;
2951 bfd_boolean relocatable;
2954 asection *input_section = link_order->u.indirect.section;
2955 bfd *input_bfd = input_section->owner;
2956 asection **sections = NULL;
2957 struct internal_reloc *internal_relocs = NULL;
2958 struct internal_syment *internal_syms = NULL;
2960 /* We only need to handle the case of relaxing, or of having a
2961 particular set of section contents, specially. */
2963 || coff_section_data (input_bfd, input_section) == NULL
2964 || coff_section_data (input_bfd, input_section)->contents == NULL)
2965 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2970 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2971 (size_t) input_section->size);
2973 if ((input_section->flags & SEC_RELOC) != 0
2974 && input_section->reloc_count > 0)
2976 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2977 bfd_byte *esym, *esymend;
2978 struct internal_syment *isymp;
2982 if (! _bfd_coff_get_external_symbols (input_bfd))
2985 internal_relocs = (_bfd_coff_read_internal_relocs
2986 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2987 FALSE, (struct internal_reloc *) NULL));
2988 if (internal_relocs == NULL)
2991 amt = obj_raw_syment_count (input_bfd);
2992 amt *= sizeof (struct internal_syment);
2993 internal_syms = (struct internal_syment *) bfd_malloc (amt);
2994 if (internal_syms == NULL)
2997 amt = obj_raw_syment_count (input_bfd);
2998 amt *= sizeof (asection *);
2999 sections = (asection **) bfd_malloc (amt);
3000 if (sections == NULL)
3003 isymp = internal_syms;
3005 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3006 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3007 while (esym < esymend)
3009 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3011 if (isymp->n_scnum != 0)
3012 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3015 if (isymp->n_value == 0)
3016 *secpp = bfd_und_section_ptr;
3018 *secpp = bfd_com_section_ptr;
3021 esym += (isymp->n_numaux + 1) * symesz;
3022 secpp += isymp->n_numaux + 1;
3023 isymp += isymp->n_numaux + 1;
3026 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3027 input_section, data, internal_relocs,
3028 internal_syms, sections))
3033 free (internal_syms);
3034 internal_syms = NULL;
3035 free (internal_relocs);
3036 internal_relocs = NULL;
3042 if (internal_relocs != NULL)
3043 free (internal_relocs);
3044 if (internal_syms != NULL)
3045 free (internal_syms);
3046 if (sections != NULL)
3051 /* The target vectors. */
3053 #ifndef TARGET_SHL_SYM
3054 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3057 #ifdef TARGET_SHL_SYM
3058 #define TARGET_SYM TARGET_SHL_SYM
3060 #define TARGET_SYM shlcoff_vec
3063 #ifndef TARGET_SHL_NAME
3064 #define TARGET_SHL_NAME "coff-shl"
3068 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3069 SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3071 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3072 0, '_', NULL, COFF_SWAP_TABLE)
3075 #ifndef TARGET_SHL_SYM
3076 static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3077 static bfd_boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3078 /* Some people want versions of the SH COFF target which do not align
3079 to 16 byte boundaries. We implement that by adding a couple of new
3080 target vectors. These are just like the ones above, but they
3081 change the default section alignment. To generate them in the
3082 assembler, use -small. To use them in the linker, use -b
3083 coff-sh{l}-small and -oformat coff-sh{l}-small.
3085 Yes, this is a horrible hack. A general solution for setting
3086 section alignment in COFF is rather complex. ELF handles this
3089 /* Only recognize the small versions if the target was not defaulted.
3090 Otherwise we won't recognize the non default endianness. */
3092 static const bfd_target *
3093 coff_small_object_p (abfd)
3096 if (abfd->target_defaulted)
3098 bfd_set_error (bfd_error_wrong_format);
3101 return coff_object_p (abfd);
3104 /* Set the section alignment for the small versions. */
3107 coff_small_new_section_hook (abfd, section)
3111 if (! coff_new_section_hook (abfd, section))
3114 /* We must align to at least a four byte boundary, because longword
3115 accesses must be on a four byte boundary. */
3116 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3117 section->alignment_power = 2;
3122 /* This is copied from bfd_coff_std_swap_table so that we can change
3123 the default section alignment power. */
3125 static bfd_coff_backend_data bfd_coff_small_swap_table =
3127 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3128 coff_swap_aux_out, coff_swap_sym_out,
3129 coff_swap_lineno_out, coff_swap_reloc_out,
3130 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3131 coff_swap_scnhdr_out,
3132 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3133 #ifdef COFF_LONG_FILENAMES
3138 COFF_DEFAULT_LONG_SECTION_NAMES,
3140 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3145 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3150 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3151 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3152 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3153 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3154 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3155 coff_classify_symbol, coff_compute_section_file_positions,
3156 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3157 coff_adjust_symndx, coff_link_add_one_symbol,
3158 coff_link_output_has_begun, coff_final_link_postscript,
3162 #define coff_small_close_and_cleanup \
3163 coff_close_and_cleanup
3164 #define coff_small_bfd_free_cached_info \
3165 coff_bfd_free_cached_info
3166 #define coff_small_get_section_contents \
3167 coff_get_section_contents
3168 #define coff_small_get_section_contents_in_window \
3169 coff_get_section_contents_in_window
3171 extern const bfd_target shlcoff_small_vec;
3173 const bfd_target shcoff_small_vec =
3175 "coff-sh-small", /* name */
3176 bfd_target_coff_flavour,
3177 BFD_ENDIAN_BIG, /* data byte order is big */
3178 BFD_ENDIAN_BIG, /* header byte order is big */
3180 (HAS_RELOC | EXEC_P | /* object flags */
3181 HAS_LINENO | HAS_DEBUG |
3182 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3184 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3185 '_', /* leading symbol underscore */
3186 '/', /* ar_pad_char */
3187 15, /* ar_max_namelen */
3188 0, /* match priority. */
3189 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3190 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3191 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3192 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3193 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3194 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3196 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3197 bfd_generic_archive_p, _bfd_dummy_target},
3198 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3200 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3201 _bfd_write_archive_contents, bfd_false},
3203 BFD_JUMP_TABLE_GENERIC (coff_small),
3204 BFD_JUMP_TABLE_COPY (coff),
3205 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3206 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3207 BFD_JUMP_TABLE_SYMBOLS (coff),
3208 BFD_JUMP_TABLE_RELOCS (coff),
3209 BFD_JUMP_TABLE_WRITE (coff),
3210 BFD_JUMP_TABLE_LINK (coff),
3211 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3213 & shlcoff_small_vec,
3215 (PTR) &bfd_coff_small_swap_table
3218 const bfd_target shlcoff_small_vec =
3220 "coff-shl-small", /* name */
3221 bfd_target_coff_flavour,
3222 BFD_ENDIAN_LITTLE, /* data byte order is little */
3223 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3225 (HAS_RELOC | EXEC_P | /* object flags */
3226 HAS_LINENO | HAS_DEBUG |
3227 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3229 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3230 '_', /* leading symbol underscore */
3231 '/', /* ar_pad_char */
3232 15, /* ar_max_namelen */
3233 0, /* match priority. */
3234 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3235 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3236 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3237 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3238 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3239 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3241 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3242 bfd_generic_archive_p, _bfd_dummy_target},
3243 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3245 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3246 _bfd_write_archive_contents, bfd_false},
3248 BFD_JUMP_TABLE_GENERIC (coff_small),
3249 BFD_JUMP_TABLE_COPY (coff),
3250 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3251 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3252 BFD_JUMP_TABLE_SYMBOLS (coff),
3253 BFD_JUMP_TABLE_RELOCS (coff),
3254 BFD_JUMP_TABLE_WRITE (coff),
3255 BFD_JUMP_TABLE_LINK (coff),
3256 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3260 (PTR) &bfd_coff_small_swap_table