1 /* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Cygnus Support.
5 Written by Steve Chamberlain, <sac@cygnus.com>.
6 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
8 This file is part of BFD, the Binary File Descriptor library.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
23 MA 02110-1301, USA. */
27 #include "libiberty.h"
31 #include "coff/internal.h"
36 #ifndef COFF_IMAGE_WITH_PE
37 static bfd_boolean sh_align_load_span
38 PARAMS ((bfd *, asection *, bfd_byte *,
39 bfd_boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
40 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *));
42 #define _bfd_sh_align_load_span sh_align_load_span
46 #undef bfd_pe_print_pdata
47 #define bfd_pe_print_pdata pe_print_ce_compressed_pdata
48 extern bfd_boolean pe_print_ce_compressed_pdata (bfd *, void *);
52 /* Internal functions. */
53 static bfd_reloc_status_type sh_reloc
54 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
55 static long get_symbol_value PARAMS ((asymbol *));
56 static bfd_boolean sh_relax_section
57 PARAMS ((bfd *, asection *, struct bfd_link_info *, bfd_boolean *));
58 static bfd_boolean sh_relax_delete_bytes
59 PARAMS ((bfd *, asection *, bfd_vma, int));
60 #ifndef COFF_IMAGE_WITH_PE
61 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
63 static bfd_boolean sh_align_loads
64 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *,
66 static bfd_boolean sh_swap_insns
67 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
68 static bfd_boolean sh_relocate_section
69 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
70 struct internal_reloc *, struct internal_syment *, asection **));
71 static bfd_byte *sh_coff_get_relocated_section_contents
72 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
73 bfd_byte *, bfd_boolean, asymbol **));
74 static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
77 /* Can't build import tables with 2**4 alignment. */
78 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
80 /* Default section alignment to 2**4. */
81 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
84 #ifdef COFF_IMAGE_WITH_PE
85 /* Align PE executables. */
86 #define COFF_PAGE_SIZE 0x1000
89 /* Generate long file names. */
90 #define COFF_LONG_FILENAMES
93 static bfd_boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
94 /* Return TRUE if this relocation should
95 appear in the output .reloc section. */
96 static bfd_boolean in_reloc_p (abfd, howto)
97 bfd * abfd ATTRIBUTE_UNUSED;
98 reloc_howto_type * howto;
100 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
104 /* The supported relocations. There are a lot of relocations defined
105 in coff/internal.h which we do not expect to ever see. */
106 static reloc_howto_type sh_coff_howtos[] =
112 HOWTO (R_SH_IMM32CE, /* type */
114 2, /* size (0 = byte, 1 = short, 2 = long) */
116 FALSE, /* pc_relative */
118 complain_overflow_bitfield, /* complain_on_overflow */
119 sh_reloc, /* special_function */
120 "r_imm32ce", /* name */
121 TRUE, /* partial_inplace */
122 0xffffffff, /* src_mask */
123 0xffffffff, /* dst_mask */
124 FALSE), /* pcrel_offset */
128 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
129 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
130 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
131 EMPTY_HOWTO (6), /* R_SH_IMM24 */
132 EMPTY_HOWTO (7), /* R_SH_LOW16 */
134 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
136 HOWTO (R_SH_PCDISP8BY2, /* type */
138 1, /* size (0 = byte, 1 = short, 2 = long) */
140 TRUE, /* pc_relative */
142 complain_overflow_signed, /* complain_on_overflow */
143 sh_reloc, /* special_function */
144 "r_pcdisp8by2", /* name */
145 TRUE, /* partial_inplace */
148 TRUE), /* pcrel_offset */
150 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
152 HOWTO (R_SH_PCDISP, /* type */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
156 TRUE, /* pc_relative */
158 complain_overflow_signed, /* complain_on_overflow */
159 sh_reloc, /* special_function */
160 "r_pcdisp12by2", /* name */
161 TRUE, /* partial_inplace */
162 0xfff, /* src_mask */
163 0xfff, /* dst_mask */
164 TRUE), /* pcrel_offset */
168 HOWTO (R_SH_IMM32, /* type */
170 2, /* size (0 = byte, 1 = short, 2 = long) */
172 FALSE, /* pc_relative */
174 complain_overflow_bitfield, /* complain_on_overflow */
175 sh_reloc, /* special_function */
176 "r_imm32", /* name */
177 TRUE, /* partial_inplace */
178 0xffffffff, /* src_mask */
179 0xffffffff, /* dst_mask */
180 FALSE), /* pcrel_offset */
184 HOWTO (R_SH_IMAGEBASE, /* type */
186 2, /* size (0 = byte, 1 = short, 2 = long) */
188 FALSE, /* pc_relative */
190 complain_overflow_bitfield, /* complain_on_overflow */
191 sh_reloc, /* special_function */
193 TRUE, /* partial_inplace */
194 0xffffffff, /* src_mask */
195 0xffffffff, /* dst_mask */
196 FALSE), /* pcrel_offset */
198 EMPTY_HOWTO (16), /* R_SH_IMM8 */
200 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
201 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
202 EMPTY_HOWTO (19), /* R_SH_IMM4 */
203 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
204 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
206 HOWTO (R_SH_PCRELIMM8BY2, /* type */
208 1, /* size (0 = byte, 1 = short, 2 = long) */
210 TRUE, /* pc_relative */
212 complain_overflow_unsigned, /* complain_on_overflow */
213 sh_reloc, /* special_function */
214 "r_pcrelimm8by2", /* name */
215 TRUE, /* partial_inplace */
218 TRUE), /* pcrel_offset */
220 HOWTO (R_SH_PCRELIMM8BY4, /* type */
222 1, /* size (0 = byte, 1 = short, 2 = long) */
224 TRUE, /* pc_relative */
226 complain_overflow_unsigned, /* complain_on_overflow */
227 sh_reloc, /* special_function */
228 "r_pcrelimm8by4", /* name */
229 TRUE, /* partial_inplace */
232 TRUE), /* pcrel_offset */
234 HOWTO (R_SH_IMM16, /* type */
236 1, /* size (0 = byte, 1 = short, 2 = long) */
238 FALSE, /* pc_relative */
240 complain_overflow_bitfield, /* complain_on_overflow */
241 sh_reloc, /* special_function */
242 "r_imm16", /* name */
243 TRUE, /* partial_inplace */
244 0xffff, /* src_mask */
245 0xffff, /* dst_mask */
246 FALSE), /* pcrel_offset */
248 HOWTO (R_SH_SWITCH16, /* type */
250 1, /* size (0 = byte, 1 = short, 2 = long) */
252 FALSE, /* pc_relative */
254 complain_overflow_bitfield, /* complain_on_overflow */
255 sh_reloc, /* special_function */
256 "r_switch16", /* name */
257 TRUE, /* partial_inplace */
258 0xffff, /* src_mask */
259 0xffff, /* dst_mask */
260 FALSE), /* pcrel_offset */
262 HOWTO (R_SH_SWITCH32, /* type */
264 2, /* size (0 = byte, 1 = short, 2 = long) */
266 FALSE, /* pc_relative */
268 complain_overflow_bitfield, /* complain_on_overflow */
269 sh_reloc, /* special_function */
270 "r_switch32", /* name */
271 TRUE, /* partial_inplace */
272 0xffffffff, /* src_mask */
273 0xffffffff, /* dst_mask */
274 FALSE), /* pcrel_offset */
276 HOWTO (R_SH_USES, /* type */
278 1, /* size (0 = byte, 1 = short, 2 = long) */
280 FALSE, /* pc_relative */
282 complain_overflow_bitfield, /* complain_on_overflow */
283 sh_reloc, /* special_function */
285 TRUE, /* partial_inplace */
286 0xffff, /* src_mask */
287 0xffff, /* dst_mask */
288 FALSE), /* pcrel_offset */
290 HOWTO (R_SH_COUNT, /* type */
292 2, /* size (0 = byte, 1 = short, 2 = long) */
294 FALSE, /* pc_relative */
296 complain_overflow_bitfield, /* complain_on_overflow */
297 sh_reloc, /* special_function */
298 "r_count", /* name */
299 TRUE, /* partial_inplace */
300 0xffffffff, /* src_mask */
301 0xffffffff, /* dst_mask */
302 FALSE), /* pcrel_offset */
304 HOWTO (R_SH_ALIGN, /* type */
306 2, /* size (0 = byte, 1 = short, 2 = long) */
308 FALSE, /* pc_relative */
310 complain_overflow_bitfield, /* complain_on_overflow */
311 sh_reloc, /* special_function */
312 "r_align", /* name */
313 TRUE, /* partial_inplace */
314 0xffffffff, /* src_mask */
315 0xffffffff, /* dst_mask */
316 FALSE), /* pcrel_offset */
318 HOWTO (R_SH_CODE, /* type */
320 2, /* size (0 = byte, 1 = short, 2 = long) */
322 FALSE, /* pc_relative */
324 complain_overflow_bitfield, /* complain_on_overflow */
325 sh_reloc, /* special_function */
327 TRUE, /* partial_inplace */
328 0xffffffff, /* src_mask */
329 0xffffffff, /* dst_mask */
330 FALSE), /* pcrel_offset */
332 HOWTO (R_SH_DATA, /* type */
334 2, /* size (0 = byte, 1 = short, 2 = long) */
336 FALSE, /* pc_relative */
338 complain_overflow_bitfield, /* complain_on_overflow */
339 sh_reloc, /* special_function */
341 TRUE, /* partial_inplace */
342 0xffffffff, /* src_mask */
343 0xffffffff, /* dst_mask */
344 FALSE), /* pcrel_offset */
346 HOWTO (R_SH_LABEL, /* type */
348 2, /* size (0 = byte, 1 = short, 2 = long) */
350 FALSE, /* pc_relative */
352 complain_overflow_bitfield, /* complain_on_overflow */
353 sh_reloc, /* special_function */
354 "r_label", /* name */
355 TRUE, /* partial_inplace */
356 0xffffffff, /* src_mask */
357 0xffffffff, /* dst_mask */
358 FALSE), /* pcrel_offset */
360 HOWTO (R_SH_SWITCH8, /* type */
362 0, /* size (0 = byte, 1 = short, 2 = long) */
364 FALSE, /* pc_relative */
366 complain_overflow_bitfield, /* complain_on_overflow */
367 sh_reloc, /* special_function */
368 "r_switch8", /* name */
369 TRUE, /* partial_inplace */
372 FALSE) /* pcrel_offset */
375 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
377 /* Check for a bad magic number. */
378 #define BADMAG(x) SHBADMAG(x)
380 /* Customize coffcode.h (this is not currently used). */
383 /* FIXME: This should not be set here. */
384 #define __A_MAGIC_SET__
387 /* Swap the r_offset field in and out. */
388 #define SWAP_IN_RELOC_OFFSET H_GET_32
389 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
391 /* Swap out extra information in the reloc structure. */
392 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
395 dst->r_stuff[0] = 'S'; \
396 dst->r_stuff[1] = 'C'; \
401 /* Get the value of a symbol, when performing a relocation. */
404 get_symbol_value (symbol)
409 if (bfd_is_com_section (symbol->section))
412 relocation = (symbol->value +
413 symbol->section->output_section->vma +
414 symbol->section->output_offset);
420 /* Convert an rtype to howto for the COFF backend linker.
421 Copied from coff-i386. */
422 #define coff_rtype_to_howto coff_sh_rtype_to_howto
423 static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
425 static reloc_howto_type *
426 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
427 bfd * abfd ATTRIBUTE_UNUSED;
429 struct internal_reloc * rel;
430 struct coff_link_hash_entry * h;
431 struct internal_syment * sym;
434 reloc_howto_type * howto;
436 howto = sh_coff_howtos + rel->r_type;
440 if (howto->pc_relative)
441 *addendp += sec->vma;
443 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
445 /* This is a common symbol. The section contents include the
446 size (sym->n_value) as an addend. The relocate_section
447 function will be adding in the final value of the symbol. We
448 need to subtract out the current size in order to get the
450 BFD_ASSERT (h != NULL);
453 if (howto->pc_relative)
457 /* If the symbol is defined, then the generic code is going to
458 add back the symbol value in order to cancel out an
459 adjustment it made to the addend. However, we set the addend
460 to 0 at the start of this function. We need to adjust here,
461 to avoid the adjustment the generic code will make. FIXME:
462 This is getting a bit hackish. */
463 if (sym != NULL && sym->n_scnum != 0)
464 *addendp -= sym->n_value;
467 if (rel->r_type == R_SH_IMAGEBASE)
468 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
473 #endif /* COFF_WITH_PE */
475 /* This structure is used to map BFD reloc codes to SH PE relocs. */
476 struct shcoff_reloc_map
478 bfd_reloc_code_real_type bfd_reloc_val;
479 unsigned char shcoff_reloc_val;
483 /* An array mapping BFD reloc codes to SH PE relocs. */
484 static const struct shcoff_reloc_map sh_reloc_map[] =
486 { BFD_RELOC_32, R_SH_IMM32CE },
487 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
488 { BFD_RELOC_CTOR, R_SH_IMM32CE },
491 /* An array mapping BFD reloc codes to SH PE relocs. */
492 static const struct shcoff_reloc_map sh_reloc_map[] =
494 { BFD_RELOC_32, R_SH_IMM32 },
495 { BFD_RELOC_CTOR, R_SH_IMM32 },
499 /* Given a BFD reloc code, return the howto structure for the
500 corresponding SH PE reloc. */
501 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
502 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
504 static reloc_howto_type *
505 sh_coff_reloc_type_lookup (abfd, code)
506 bfd * abfd ATTRIBUTE_UNUSED;
507 bfd_reloc_code_real_type code;
511 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
512 if (sh_reloc_map[i].bfd_reloc_val == code)
513 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
515 fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
519 static reloc_howto_type *
520 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
525 for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
526 if (sh_coff_howtos[i].name != NULL
527 && strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
528 return &sh_coff_howtos[i];
533 /* This macro is used in coffcode.h to get the howto corresponding to
534 an internal reloc. */
536 #define RTYPE2HOWTO(relent, internal) \
538 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
539 ? &sh_coff_howtos[(internal)->r_type] \
540 : (reloc_howto_type *) NULL))
542 /* This is the same as the macro in coffcode.h, except that it copies
543 r_offset into reloc_entry->addend for some relocs. */
544 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
546 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
547 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
548 coffsym = (obj_symbols (abfd) \
549 + (cache_ptr->sym_ptr_ptr - symbols)); \
551 coffsym = coff_symbol_from (abfd, ptr); \
552 if (coffsym != (coff_symbol_type *) NULL \
553 && coffsym->native->u.syment.n_scnum == 0) \
554 cache_ptr->addend = 0; \
555 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
556 && ptr->section != (asection *) NULL) \
557 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
559 cache_ptr->addend = 0; \
560 if ((reloc).r_type == R_SH_SWITCH8 \
561 || (reloc).r_type == R_SH_SWITCH16 \
562 || (reloc).r_type == R_SH_SWITCH32 \
563 || (reloc).r_type == R_SH_USES \
564 || (reloc).r_type == R_SH_COUNT \
565 || (reloc).r_type == R_SH_ALIGN) \
566 cache_ptr->addend = (reloc).r_offset; \
569 /* This is the howto function for the SH relocations. */
571 static bfd_reloc_status_type
572 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
575 arelent *reloc_entry;
578 asection *input_section;
580 char **error_message ATTRIBUTE_UNUSED;
584 unsigned short r_type;
585 bfd_vma addr = reloc_entry->address;
586 bfd_byte *hit_data = addr + (bfd_byte *) data;
588 r_type = reloc_entry->howto->type;
590 if (output_bfd != NULL)
592 /* Partial linking--do nothing. */
593 reloc_entry->address += input_section->output_offset;
597 /* Almost all relocs have to do with relaxing. If any work must be
598 done for them, it has been done in sh_relax_section. */
599 if (r_type != R_SH_IMM32
601 && r_type != R_SH_IMM32CE
602 && r_type != R_SH_IMAGEBASE
604 && (r_type != R_SH_PCDISP
605 || (symbol_in->flags & BSF_LOCAL) != 0))
608 if (symbol_in != NULL
609 && bfd_is_und_section (symbol_in->section))
610 return bfd_reloc_undefined;
612 sym_value = get_symbol_value (symbol_in);
620 insn = bfd_get_32 (abfd, hit_data);
621 insn += sym_value + reloc_entry->addend;
622 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
626 insn = bfd_get_32 (abfd, hit_data);
627 insn += sym_value + reloc_entry->addend;
628 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
629 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
633 insn = bfd_get_16 (abfd, hit_data);
634 sym_value += reloc_entry->addend;
635 sym_value -= (input_section->output_section->vma
636 + input_section->output_offset
639 sym_value += (insn & 0xfff) << 1;
642 insn = (insn & 0xf000) | (sym_value & 0xfff);
643 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
644 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
645 return bfd_reloc_overflow;
655 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
657 /* We can do relaxing. */
658 #define coff_bfd_relax_section sh_relax_section
660 /* We use the special COFF backend linker. */
661 #define coff_relocate_section sh_relocate_section
663 /* When relaxing, we need to use special code to get the relocated
665 #define coff_bfd_get_relocated_section_contents \
666 sh_coff_get_relocated_section_contents
668 #include "coffcode.h"
670 /* This function handles relaxing on the SH.
672 Function calls on the SH look like this:
681 The compiler and assembler will cooperate to create R_SH_USES
682 relocs on the jsr instructions. The r_offset field of the
683 R_SH_USES reloc is the PC relative offset to the instruction which
684 loads the register (the r_offset field is computed as though it
685 were a jump instruction, so the offset value is actually from four
686 bytes past the instruction). The linker can use this reloc to
687 determine just which function is being called, and thus decide
688 whether it is possible to replace the jsr with a bsr.
690 If multiple function calls are all based on a single register load
691 (i.e., the same function is called multiple times), the compiler
692 guarantees that each function call will have an R_SH_USES reloc.
693 Therefore, if the linker is able to convert each R_SH_USES reloc
694 which refers to that address, it can safely eliminate the register
697 When the assembler creates an R_SH_USES reloc, it examines it to
698 determine which address is being loaded (L1 in the above example).
699 It then counts the number of references to that address, and
700 creates an R_SH_COUNT reloc at that address. The r_offset field of
701 the R_SH_COUNT reloc will be the number of references. If the
702 linker is able to eliminate a register load, it can use the
703 R_SH_COUNT reloc to see whether it can also eliminate the function
706 SH relaxing also handles another, unrelated, matter. On the SH, if
707 a load or store instruction is not aligned on a four byte boundary,
708 the memory cycle interferes with the 32 bit instruction fetch,
709 causing a one cycle bubble in the pipeline. Therefore, we try to
710 align load and store instructions on four byte boundaries if we
711 can, by swapping them with one of the adjacent instructions. */
714 sh_relax_section (abfd, sec, link_info, again)
717 struct bfd_link_info *link_info;
720 struct internal_reloc *internal_relocs;
721 bfd_boolean have_code;
722 struct internal_reloc *irel, *irelend;
723 bfd_byte *contents = NULL;
727 if (link_info->relocatable
728 || (sec->flags & SEC_RELOC) == 0
729 || sec->reloc_count == 0)
732 if (coff_section_data (abfd, sec) == NULL)
734 bfd_size_type amt = sizeof (struct coff_section_tdata);
735 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
736 if (sec->used_by_bfd == NULL)
740 internal_relocs = (_bfd_coff_read_internal_relocs
741 (abfd, sec, link_info->keep_memory,
742 (bfd_byte *) NULL, FALSE,
743 (struct internal_reloc *) NULL));
744 if (internal_relocs == NULL)
749 irelend = internal_relocs + sec->reloc_count;
750 for (irel = internal_relocs; irel < irelend; irel++)
752 bfd_vma laddr, paddr, symval;
754 struct internal_reloc *irelfn, *irelscan, *irelcount;
755 struct internal_syment sym;
758 if (irel->r_type == R_SH_CODE)
761 if (irel->r_type != R_SH_USES)
764 /* Get the section contents. */
765 if (contents == NULL)
767 if (coff_section_data (abfd, sec)->contents != NULL)
768 contents = coff_section_data (abfd, sec)->contents;
771 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
776 /* The r_offset field of the R_SH_USES reloc will point us to
777 the register load. The 4 is because the r_offset field is
778 computed as though it were a jump offset, which are based
779 from 4 bytes after the jump instruction. */
780 laddr = irel->r_vaddr - sec->vma + 4;
781 /* Careful to sign extend the 32-bit offset. */
782 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
783 if (laddr >= sec->size)
785 (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
786 abfd, (unsigned long) irel->r_vaddr);
789 insn = bfd_get_16 (abfd, contents + laddr);
791 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
792 if ((insn & 0xf000) != 0xd000)
794 ((*_bfd_error_handler)
795 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
796 abfd, (unsigned long) irel->r_vaddr, insn));
800 /* Get the address from which the register is being loaded. The
801 displacement in the mov.l instruction is quadrupled. It is a
802 displacement from four bytes after the movl instruction, but,
803 before adding in the PC address, two least significant bits
804 of the PC are cleared. We assume that the section is aligned
805 on a four byte boundary. */
808 paddr += (laddr + 4) &~ (bfd_vma) 3;
809 if (paddr >= sec->size)
811 ((*_bfd_error_handler)
812 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
813 abfd, (unsigned long) irel->r_vaddr));
817 /* Get the reloc for the address from which the register is
818 being loaded. This reloc will tell us which function is
819 actually being called. */
821 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
822 if (irelfn->r_vaddr == paddr
824 && (irelfn->r_type == R_SH_IMM32
825 || irelfn->r_type == R_SH_IMM32CE
826 || irelfn->r_type == R_SH_IMAGEBASE)
829 && irelfn->r_type == R_SH_IMM32
833 if (irelfn >= irelend)
835 ((*_bfd_error_handler)
836 ("%B: 0x%lx: warning: could not find expected reloc",
837 abfd, (unsigned long) paddr));
841 /* Get the value of the symbol referred to by the reloc. */
842 if (! _bfd_coff_get_external_symbols (abfd))
844 bfd_coff_swap_sym_in (abfd,
845 ((bfd_byte *) obj_coff_external_syms (abfd)
847 * bfd_coff_symesz (abfd))),
849 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
851 ((*_bfd_error_handler)
852 ("%B: 0x%lx: warning: symbol in unexpected section",
853 abfd, (unsigned long) paddr));
857 if (sym.n_sclass != C_EXT)
859 symval = (sym.n_value
861 + sec->output_section->vma
862 + sec->output_offset);
866 struct coff_link_hash_entry *h;
868 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
869 BFD_ASSERT (h != NULL);
870 if (h->root.type != bfd_link_hash_defined
871 && h->root.type != bfd_link_hash_defweak)
873 /* This appears to be a reference to an undefined
874 symbol. Just ignore it--it will be caught by the
875 regular reloc processing. */
879 symval = (h->root.u.def.value
880 + h->root.u.def.section->output_section->vma
881 + h->root.u.def.section->output_offset);
884 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
886 /* See if this function call can be shortened. */
890 + sec->output_section->vma
893 if (foff < -0x1000 || foff >= 0x1000)
895 /* After all that work, we can't shorten this function call. */
899 /* Shorten the function call. */
901 /* For simplicity of coding, we are going to modify the section
902 contents, the section relocs, and the BFD symbol table. We
903 must tell the rest of the code not to free up this
904 information. It would be possible to instead create a table
905 of changes which have to be made, as is done in coff-mips.c;
906 that would be more work, but would require less memory when
907 the linker is run. */
909 coff_section_data (abfd, sec)->relocs = internal_relocs;
910 coff_section_data (abfd, sec)->keep_relocs = TRUE;
912 coff_section_data (abfd, sec)->contents = contents;
913 coff_section_data (abfd, sec)->keep_contents = TRUE;
915 obj_coff_keep_syms (abfd) = TRUE;
917 /* Replace the jsr with a bsr. */
919 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
920 replace the jsr with a bsr. */
921 irel->r_type = R_SH_PCDISP;
922 irel->r_symndx = irelfn->r_symndx;
923 if (sym.n_sclass != C_EXT)
925 /* If this needs to be changed because of future relaxing,
926 it will be handled here like other internal PCDISP
929 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
930 contents + irel->r_vaddr - sec->vma);
934 /* We can't fully resolve this yet, because the external
935 symbol value may be changed by future relaxing. We let
936 the final link phase handle it. */
937 bfd_put_16 (abfd, (bfd_vma) 0xb000,
938 contents + irel->r_vaddr - sec->vma);
941 /* See if there is another R_SH_USES reloc referring to the same
943 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
944 if (irelscan->r_type == R_SH_USES
945 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
947 if (irelscan < irelend)
949 /* Some other function call depends upon this register load,
950 and we have not yet converted that function call.
951 Indeed, we may never be able to convert it. There is
952 nothing else we can do at this point. */
956 /* Look for a R_SH_COUNT reloc on the location where the
957 function address is stored. Do this before deleting any
958 bytes, to avoid confusion about the address. */
959 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
960 if (irelcount->r_vaddr == paddr
961 && irelcount->r_type == R_SH_COUNT)
964 /* Delete the register load. */
965 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
968 /* That will change things, so, just in case it permits some
969 other function call to come within range, we should relax
970 again. Note that this is not required, and it may be slow. */
973 /* Now check whether we got a COUNT reloc. */
974 if (irelcount >= irelend)
976 ((*_bfd_error_handler)
977 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
978 abfd, (unsigned long) paddr));
982 /* The number of uses is stored in the r_offset field. We've
984 if (irelcount->r_offset == 0)
986 ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
987 abfd, (unsigned long) paddr));
991 --irelcount->r_offset;
993 /* If there are no more uses, we can delete the address. Reload
994 the address from irelfn, in case it was changed by the
995 previous call to sh_relax_delete_bytes. */
996 if (irelcount->r_offset == 0)
998 if (! sh_relax_delete_bytes (abfd, sec,
999 irelfn->r_vaddr - sec->vma, 4))
1003 /* We've done all we can with that function call. */
1006 /* Look for load and store instructions that we can align on four
1010 bfd_boolean swapped;
1012 /* Get the section contents. */
1013 if (contents == NULL)
1015 if (coff_section_data (abfd, sec)->contents != NULL)
1016 contents = coff_section_data (abfd, sec)->contents;
1019 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1024 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1029 coff_section_data (abfd, sec)->relocs = internal_relocs;
1030 coff_section_data (abfd, sec)->keep_relocs = TRUE;
1032 coff_section_data (abfd, sec)->contents = contents;
1033 coff_section_data (abfd, sec)->keep_contents = TRUE;
1035 obj_coff_keep_syms (abfd) = TRUE;
1039 if (internal_relocs != NULL
1040 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1042 if (! link_info->keep_memory)
1043 free (internal_relocs);
1045 coff_section_data (abfd, sec)->relocs = internal_relocs;
1048 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1050 if (! link_info->keep_memory)
1053 /* Cache the section contents for coff_link_input_bfd. */
1054 coff_section_data (abfd, sec)->contents = contents;
1060 if (internal_relocs != NULL
1061 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1062 free (internal_relocs);
1063 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1068 /* Delete some bytes from a section while relaxing. */
1071 sh_relax_delete_bytes (abfd, sec, addr, count)
1078 struct internal_reloc *irel, *irelend;
1079 struct internal_reloc *irelalign;
1081 bfd_byte *esym, *esymend;
1082 bfd_size_type symesz;
1083 struct coff_link_hash_entry **sym_hash;
1086 contents = coff_section_data (abfd, sec)->contents;
1088 /* The deletion must stop at the next ALIGN reloc for an aligment
1089 power larger than the number of bytes we are deleting. */
1094 irel = coff_section_data (abfd, sec)->relocs;
1095 irelend = irel + sec->reloc_count;
1096 for (; irel < irelend; irel++)
1098 if (irel->r_type == R_SH_ALIGN
1099 && irel->r_vaddr - sec->vma > addr
1100 && count < (1 << irel->r_offset))
1103 toaddr = irel->r_vaddr - sec->vma;
1108 /* Actually delete the bytes. */
1109 memmove (contents + addr, contents + addr + count,
1110 (size_t) (toaddr - addr - count));
1111 if (irelalign == NULL)
1117 #define NOP_OPCODE (0x0009)
1119 BFD_ASSERT ((count & 1) == 0);
1120 for (i = 0; i < count; i += 2)
1121 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1124 /* Adjust all the relocs. */
1125 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1127 bfd_vma nraddr, stop;
1130 struct internal_syment sym;
1131 int off, adjust, oinsn;
1132 bfd_signed_vma voff = 0;
1133 bfd_boolean overflow;
1135 /* Get the new reloc address. */
1136 nraddr = irel->r_vaddr - sec->vma;
1137 if ((irel->r_vaddr - sec->vma > addr
1138 && irel->r_vaddr - sec->vma < toaddr)
1139 || (irel->r_type == R_SH_ALIGN
1140 && irel->r_vaddr - sec->vma == toaddr))
1143 /* See if this reloc was for the bytes we have deleted, in which
1144 case we no longer care about it. Don't delete relocs which
1145 represent addresses, though. */
1146 if (irel->r_vaddr - sec->vma >= addr
1147 && irel->r_vaddr - sec->vma < addr + count
1148 && irel->r_type != R_SH_ALIGN
1149 && irel->r_type != R_SH_CODE
1150 && irel->r_type != R_SH_DATA
1151 && irel->r_type != R_SH_LABEL)
1152 irel->r_type = R_SH_UNUSED;
1154 /* If this is a PC relative reloc, see if the range it covers
1155 includes the bytes we have deleted. */
1156 switch (irel->r_type)
1161 case R_SH_PCDISP8BY2:
1163 case R_SH_PCRELIMM8BY2:
1164 case R_SH_PCRELIMM8BY4:
1165 start = irel->r_vaddr - sec->vma;
1166 insn = bfd_get_16 (abfd, contents + nraddr);
1170 switch (irel->r_type)
1173 start = stop = addr;
1179 case R_SH_IMAGEBASE:
1181 /* If this reloc is against a symbol defined in this
1182 section, and the symbol will not be adjusted below, we
1183 must check the addend to see it will put the value in
1184 range to be adjusted, and hence must be changed. */
1185 bfd_coff_swap_sym_in (abfd,
1186 ((bfd_byte *) obj_coff_external_syms (abfd)
1188 * bfd_coff_symesz (abfd))),
1190 if (sym.n_sclass != C_EXT
1191 && sym.n_scnum == sec->target_index
1192 && ((bfd_vma) sym.n_value <= addr
1193 || (bfd_vma) sym.n_value >= toaddr))
1197 val = bfd_get_32 (abfd, contents + nraddr);
1199 if (val > addr && val < toaddr)
1200 bfd_put_32 (abfd, val - count, contents + nraddr);
1202 start = stop = addr;
1205 case R_SH_PCDISP8BY2:
1209 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1213 bfd_coff_swap_sym_in (abfd,
1214 ((bfd_byte *) obj_coff_external_syms (abfd)
1216 * bfd_coff_symesz (abfd))),
1218 if (sym.n_sclass == C_EXT)
1219 start = stop = addr;
1225 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1229 case R_SH_PCRELIMM8BY2:
1231 stop = start + 4 + off * 2;
1234 case R_SH_PCRELIMM8BY4:
1236 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1242 /* These relocs types represent
1244 The r_offset field holds the difference between the reloc
1245 address and L1. That is the start of the reloc, and
1246 adding in the contents gives us the top. We must adjust
1247 both the r_offset field and the section contents. */
1249 start = irel->r_vaddr - sec->vma;
1250 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1254 && (stop <= addr || stop >= toaddr))
1255 irel->r_offset += count;
1256 else if (stop > addr
1258 && (start <= addr || start >= toaddr))
1259 irel->r_offset -= count;
1263 if (irel->r_type == R_SH_SWITCH16)
1264 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1265 else if (irel->r_type == R_SH_SWITCH8)
1266 voff = bfd_get_8 (abfd, contents + nraddr);
1268 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1269 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1274 start = irel->r_vaddr - sec->vma;
1275 stop = (bfd_vma) ((bfd_signed_vma) start
1276 + (long) irel->r_offset
1283 && (stop <= addr || stop >= toaddr))
1285 else if (stop > addr
1287 && (start <= addr || start >= toaddr))
1296 switch (irel->r_type)
1302 case R_SH_PCDISP8BY2:
1303 case R_SH_PCRELIMM8BY2:
1305 if ((oinsn & 0xff00) != (insn & 0xff00))
1307 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1312 if ((oinsn & 0xf000) != (insn & 0xf000))
1314 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1317 case R_SH_PCRELIMM8BY4:
1318 BFD_ASSERT (adjust == count || count >= 4);
1323 if ((irel->r_vaddr & 3) == 0)
1326 if ((oinsn & 0xff00) != (insn & 0xff00))
1328 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1333 if (voff < 0 || voff >= 0xff)
1335 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1340 if (voff < - 0x8000 || voff >= 0x8000)
1342 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1347 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1351 irel->r_offset += adjust;
1357 ((*_bfd_error_handler)
1358 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1359 abfd, (unsigned long) irel->r_vaddr));
1360 bfd_set_error (bfd_error_bad_value);
1365 irel->r_vaddr = nraddr + sec->vma;
1368 /* Look through all the other sections. If there contain any IMM32
1369 relocs against internal symbols which we are not going to adjust
1370 below, we may need to adjust the addends. */
1371 for (o = abfd->sections; o != NULL; o = o->next)
1373 struct internal_reloc *internal_relocs;
1374 struct internal_reloc *irelscan, *irelscanend;
1375 bfd_byte *ocontents;
1378 || (o->flags & SEC_RELOC) == 0
1379 || o->reloc_count == 0)
1382 /* We always cache the relocs. Perhaps, if info->keep_memory is
1383 FALSE, we should free them, if we are permitted to, when we
1384 leave sh_coff_relax_section. */
1385 internal_relocs = (_bfd_coff_read_internal_relocs
1386 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1387 (struct internal_reloc *) NULL));
1388 if (internal_relocs == NULL)
1392 irelscanend = internal_relocs + o->reloc_count;
1393 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1395 struct internal_syment sym;
1398 if (irelscan->r_type != R_SH_IMM32
1399 && irelscan->r_type != R_SH_IMAGEBASE
1400 && irelscan->r_type != R_SH_IMM32CE)
1402 if (irelscan->r_type != R_SH_IMM32)
1406 bfd_coff_swap_sym_in (abfd,
1407 ((bfd_byte *) obj_coff_external_syms (abfd)
1408 + (irelscan->r_symndx
1409 * bfd_coff_symesz (abfd))),
1411 if (sym.n_sclass != C_EXT
1412 && sym.n_scnum == sec->target_index
1413 && ((bfd_vma) sym.n_value <= addr
1414 || (bfd_vma) sym.n_value >= toaddr))
1418 if (ocontents == NULL)
1420 if (coff_section_data (abfd, o)->contents != NULL)
1421 ocontents = coff_section_data (abfd, o)->contents;
1424 if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1426 /* We always cache the section contents.
1427 Perhaps, if info->keep_memory is FALSE, we
1428 should free them, if we are permitted to,
1429 when we leave sh_coff_relax_section. */
1430 coff_section_data (abfd, o)->contents = ocontents;
1434 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1436 if (val > addr && val < toaddr)
1437 bfd_put_32 (abfd, val - count,
1438 ocontents + irelscan->r_vaddr - o->vma);
1440 coff_section_data (abfd, o)->keep_contents = TRUE;
1445 /* Adjusting the internal symbols will not work if something has
1446 already retrieved the generic symbols. It would be possible to
1447 make this work by adjusting the generic symbols at the same time.
1448 However, this case should not arise in normal usage. */
1449 if (obj_symbols (abfd) != NULL
1450 || obj_raw_syments (abfd) != NULL)
1452 ((*_bfd_error_handler)
1453 ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1454 bfd_set_error (bfd_error_invalid_operation);
1458 /* Adjust all the symbols. */
1459 sym_hash = obj_coff_sym_hashes (abfd);
1460 symesz = bfd_coff_symesz (abfd);
1461 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1462 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1463 while (esym < esymend)
1465 struct internal_syment isym;
1467 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1469 if (isym.n_scnum == sec->target_index
1470 && (bfd_vma) isym.n_value > addr
1471 && (bfd_vma) isym.n_value < toaddr)
1473 isym.n_value -= count;
1475 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1477 if (*sym_hash != NULL)
1479 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1480 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1481 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1482 && (*sym_hash)->root.u.def.value < toaddr);
1483 (*sym_hash)->root.u.def.value -= count;
1487 esym += (isym.n_numaux + 1) * symesz;
1488 sym_hash += isym.n_numaux + 1;
1491 /* See if we can move the ALIGN reloc forward. We have adjusted
1492 r_vaddr for it already. */
1493 if (irelalign != NULL)
1495 bfd_vma alignto, alignaddr;
1497 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1498 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1499 1 << irelalign->r_offset);
1500 if (alignto != alignaddr)
1502 /* Tail recursion. */
1503 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1504 (int) (alignto - alignaddr));
1511 /* This is yet another version of the SH opcode table, used to rapidly
1512 get information about a particular instruction. */
1514 /* The opcode map is represented by an array of these structures. The
1515 array is indexed by the high order four bits in the instruction. */
1517 struct sh_major_opcode
1519 /* A pointer to the instruction list. This is an array which
1520 contains all the instructions with this major opcode. */
1521 const struct sh_minor_opcode *minor_opcodes;
1522 /* The number of elements in minor_opcodes. */
1523 unsigned short count;
1526 /* This structure holds information for a set of SH opcodes. The
1527 instruction code is anded with the mask value, and the resulting
1528 value is used to search the order opcode list. */
1530 struct sh_minor_opcode
1532 /* The sorted opcode list. */
1533 const struct sh_opcode *opcodes;
1534 /* The number of elements in opcodes. */
1535 unsigned short count;
1536 /* The mask value to use when searching the opcode list. */
1537 unsigned short mask;
1540 /* This structure holds information for an SH instruction. An array
1541 of these structures is sorted in order by opcode. */
1545 /* The code for this instruction, after it has been anded with the
1546 mask value in the sh_major_opcode structure. */
1547 unsigned short opcode;
1548 /* Flags for this instruction. */
1549 unsigned long flags;
1552 /* Flag which appear in the sh_opcode structure. */
1554 /* This instruction loads a value from memory. */
1557 /* This instruction stores a value to memory. */
1560 /* This instruction is a branch. */
1561 #define BRANCH (0x4)
1563 /* This instruction has a delay slot. */
1566 /* This instruction uses the value in the register in the field at
1567 mask 0x0f00 of the instruction. */
1568 #define USES1 (0x10)
1569 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1571 /* This instruction uses the value in the register in the field at
1572 mask 0x00f0 of the instruction. */
1573 #define USES2 (0x20)
1574 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1576 /* This instruction uses the value in register 0. */
1577 #define USESR0 (0x40)
1579 /* This instruction sets the value in the register in the field at
1580 mask 0x0f00 of the instruction. */
1581 #define SETS1 (0x80)
1582 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1584 /* This instruction sets the value in the register in the field at
1585 mask 0x00f0 of the instruction. */
1586 #define SETS2 (0x100)
1587 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1589 /* This instruction sets register 0. */
1590 #define SETSR0 (0x200)
1592 /* This instruction sets a special register. */
1593 #define SETSSP (0x400)
1595 /* This instruction uses a special register. */
1596 #define USESSP (0x800)
1598 /* This instruction uses the floating point register in the field at
1599 mask 0x0f00 of the instruction. */
1600 #define USESF1 (0x1000)
1601 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1603 /* This instruction uses the floating point register in the field at
1604 mask 0x00f0 of the instruction. */
1605 #define USESF2 (0x2000)
1606 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1608 /* This instruction uses floating point register 0. */
1609 #define USESF0 (0x4000)
1611 /* This instruction sets the floating point register in the field at
1612 mask 0x0f00 of the instruction. */
1613 #define SETSF1 (0x8000)
1614 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1616 #define USESAS (0x10000)
1617 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1618 #define USESR8 (0x20000)
1619 #define SETSAS (0x40000)
1620 #define SETSAS_REG(x) USESAS_REG (x)
1622 #define MAP(a) a, sizeof a / sizeof a[0]
1624 #ifndef COFF_IMAGE_WITH_PE
1625 static bfd_boolean sh_insn_uses_reg
1626 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1627 static bfd_boolean sh_insn_sets_reg
1628 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1629 static bfd_boolean sh_insn_uses_or_sets_reg
1630 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1631 static bfd_boolean sh_insn_uses_freg
1632 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1633 static bfd_boolean sh_insn_sets_freg
1634 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1635 static bfd_boolean sh_insn_uses_or_sets_freg
1636 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1637 static bfd_boolean sh_insns_conflict
1638 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1639 const struct sh_opcode *));
1640 static bfd_boolean sh_load_use
1641 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1642 const struct sh_opcode *));
1644 /* The opcode maps. */
1646 static const struct sh_opcode sh_opcode00[] =
1648 { 0x0008, SETSSP }, /* clrt */
1649 { 0x0009, 0 }, /* nop */
1650 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1651 { 0x0018, SETSSP }, /* sett */
1652 { 0x0019, SETSSP }, /* div0u */
1653 { 0x001b, 0 }, /* sleep */
1654 { 0x0028, SETSSP }, /* clrmac */
1655 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1656 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1657 { 0x0048, SETSSP }, /* clrs */
1658 { 0x0058, SETSSP } /* sets */
1661 static const struct sh_opcode sh_opcode01[] =
1663 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1664 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1665 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1666 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1667 { 0x0029, SETS1 | USESSP }, /* movt rn */
1668 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1669 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1670 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1671 { 0x0083, LOAD | USES1 }, /* pref @rn */
1672 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1673 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1674 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1675 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1676 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1679 static const struct sh_opcode sh_opcode02[] =
1681 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1682 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1683 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1684 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1685 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1686 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1687 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1688 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1689 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1692 static const struct sh_minor_opcode sh_opcode0[] =
1694 { MAP (sh_opcode00), 0xffff },
1695 { MAP (sh_opcode01), 0xf0ff },
1696 { MAP (sh_opcode02), 0xf00f }
1699 static const struct sh_opcode sh_opcode10[] =
1701 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1704 static const struct sh_minor_opcode sh_opcode1[] =
1706 { MAP (sh_opcode10), 0xf000 }
1709 static const struct sh_opcode sh_opcode20[] =
1711 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1712 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1713 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1714 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1715 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1716 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1717 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1718 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1719 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1720 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1721 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1722 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1723 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1724 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1725 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1728 static const struct sh_minor_opcode sh_opcode2[] =
1730 { MAP (sh_opcode20), 0xf00f }
1733 static const struct sh_opcode sh_opcode30[] =
1735 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1736 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1737 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1738 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1739 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1740 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1741 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1742 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1743 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1744 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1745 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1746 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1747 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1748 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1751 static const struct sh_minor_opcode sh_opcode3[] =
1753 { MAP (sh_opcode30), 0xf00f }
1756 static const struct sh_opcode sh_opcode40[] =
1758 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1759 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1760 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1761 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1762 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1763 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1764 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1765 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1766 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1767 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1768 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1769 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1770 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1771 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1772 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1773 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1774 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1775 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1776 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1777 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1778 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1779 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1780 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1781 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1782 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1783 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1784 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1785 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1786 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1787 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1788 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1789 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1790 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1791 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1792 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1793 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1794 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1795 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1796 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1797 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1798 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1799 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1800 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1801 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1802 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1803 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1804 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1805 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1806 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1807 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1808 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1811 static const struct sh_opcode sh_opcode41[] =
1813 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1814 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1815 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1816 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1817 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1818 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1821 static const struct sh_minor_opcode sh_opcode4[] =
1823 { MAP (sh_opcode40), 0xf0ff },
1824 { MAP (sh_opcode41), 0xf00f }
1827 static const struct sh_opcode sh_opcode50[] =
1829 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1832 static const struct sh_minor_opcode sh_opcode5[] =
1834 { MAP (sh_opcode50), 0xf000 }
1837 static const struct sh_opcode sh_opcode60[] =
1839 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1840 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1841 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1842 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1843 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1844 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1845 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1846 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1847 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1848 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1849 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1850 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1851 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1852 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1853 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1854 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1857 static const struct sh_minor_opcode sh_opcode6[] =
1859 { MAP (sh_opcode60), 0xf00f }
1862 static const struct sh_opcode sh_opcode70[] =
1864 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1867 static const struct sh_minor_opcode sh_opcode7[] =
1869 { MAP (sh_opcode70), 0xf000 }
1872 static const struct sh_opcode sh_opcode80[] =
1874 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1875 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1876 { 0x8200, SETSSP }, /* setrc #imm */
1877 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1878 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1879 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1880 { 0x8900, BRANCH | USESSP }, /* bt label */
1881 { 0x8b00, BRANCH | USESSP }, /* bf label */
1882 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1883 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1884 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1885 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1888 static const struct sh_minor_opcode sh_opcode8[] =
1890 { MAP (sh_opcode80), 0xff00 }
1893 static const struct sh_opcode sh_opcode90[] =
1895 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1898 static const struct sh_minor_opcode sh_opcode9[] =
1900 { MAP (sh_opcode90), 0xf000 }
1903 static const struct sh_opcode sh_opcodea0[] =
1905 { 0xa000, BRANCH | DELAY } /* bra label */
1908 static const struct sh_minor_opcode sh_opcodea[] =
1910 { MAP (sh_opcodea0), 0xf000 }
1913 static const struct sh_opcode sh_opcodeb0[] =
1915 { 0xb000, BRANCH | DELAY } /* bsr label */
1918 static const struct sh_minor_opcode sh_opcodeb[] =
1920 { MAP (sh_opcodeb0), 0xf000 }
1923 static const struct sh_opcode sh_opcodec0[] =
1925 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1926 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1927 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1928 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1929 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1930 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1931 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1932 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1933 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1934 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1935 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1936 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1937 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1938 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1939 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1940 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1943 static const struct sh_minor_opcode sh_opcodec[] =
1945 { MAP (sh_opcodec0), 0xff00 }
1948 static const struct sh_opcode sh_opcoded0[] =
1950 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1953 static const struct sh_minor_opcode sh_opcoded[] =
1955 { MAP (sh_opcoded0), 0xf000 }
1958 static const struct sh_opcode sh_opcodee0[] =
1960 { 0xe000, SETS1 } /* mov #imm,rn */
1963 static const struct sh_minor_opcode sh_opcodee[] =
1965 { MAP (sh_opcodee0), 0xf000 }
1968 static const struct sh_opcode sh_opcodef0[] =
1970 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1971 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1972 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1973 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1974 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1975 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1976 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1977 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1978 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1979 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1980 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1981 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1982 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1983 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1986 static const struct sh_opcode sh_opcodef1[] =
1988 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1989 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1990 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1991 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1992 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1993 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1994 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
1995 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
1996 { 0xf08d, SETSF1 }, /* fldi0 fn */
1997 { 0xf09d, SETSF1 } /* fldi1 fn */
2000 static const struct sh_minor_opcode sh_opcodef[] =
2002 { MAP (sh_opcodef0), 0xf00f },
2003 { MAP (sh_opcodef1), 0xf0ff }
2006 static struct sh_major_opcode sh_opcodes[] =
2008 { MAP (sh_opcode0) },
2009 { MAP (sh_opcode1) },
2010 { MAP (sh_opcode2) },
2011 { MAP (sh_opcode3) },
2012 { MAP (sh_opcode4) },
2013 { MAP (sh_opcode5) },
2014 { MAP (sh_opcode6) },
2015 { MAP (sh_opcode7) },
2016 { MAP (sh_opcode8) },
2017 { MAP (sh_opcode9) },
2018 { MAP (sh_opcodea) },
2019 { MAP (sh_opcodeb) },
2020 { MAP (sh_opcodec) },
2021 { MAP (sh_opcoded) },
2022 { MAP (sh_opcodee) },
2023 { MAP (sh_opcodef) }
2026 /* The double data transfer / parallel processing insns are not
2027 described here. This will cause sh_align_load_span to leave them alone. */
2029 static const struct sh_opcode sh_dsp_opcodef0[] =
2031 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2032 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2033 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2034 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2035 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2036 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2037 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2038 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2041 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2043 { MAP (sh_dsp_opcodef0), 0xfc0d }
2046 /* Given an instruction, return a pointer to the corresponding
2047 sh_opcode structure. Return NULL if the instruction is not
2050 static const struct sh_opcode *
2054 const struct sh_major_opcode *maj;
2055 const struct sh_minor_opcode *min, *minend;
2057 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2058 min = maj->minor_opcodes;
2059 minend = min + maj->count;
2060 for (; min < minend; min++)
2063 const struct sh_opcode *op, *opend;
2065 l = insn & min->mask;
2067 opend = op + min->count;
2069 /* Since the opcodes tables are sorted, we could use a binary
2070 search here if the count were above some cutoff value. */
2071 for (; op < opend; op++)
2072 if (op->opcode == l)
2079 /* See whether an instruction uses or sets a general purpose register */
2082 sh_insn_uses_or_sets_reg (insn, op, reg)
2084 const struct sh_opcode *op;
2087 if (sh_insn_uses_reg (insn, op, reg))
2090 return sh_insn_sets_reg (insn, op, reg);
2093 /* See whether an instruction uses a general purpose register. */
2096 sh_insn_uses_reg (insn, op, reg)
2098 const struct sh_opcode *op;
2105 if ((f & USES1) != 0
2106 && USES1_REG (insn) == reg)
2108 if ((f & USES2) != 0
2109 && USES2_REG (insn) == reg)
2111 if ((f & USESR0) != 0
2114 if ((f & USESAS) && reg == USESAS_REG (insn))
2116 if ((f & USESR8) && reg == 8)
2122 /* See whether an instruction sets a general purpose register. */
2125 sh_insn_sets_reg (insn, op, reg)
2127 const struct sh_opcode *op;
2134 if ((f & SETS1) != 0
2135 && SETS1_REG (insn) == reg)
2137 if ((f & SETS2) != 0
2138 && SETS2_REG (insn) == reg)
2140 if ((f & SETSR0) != 0
2143 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2149 /* See whether an instruction uses or sets a floating point register */
2152 sh_insn_uses_or_sets_freg (insn, op, reg)
2154 const struct sh_opcode *op;
2157 if (sh_insn_uses_freg (insn, op, reg))
2160 return sh_insn_sets_freg (insn, op, reg);
2163 /* See whether an instruction uses a floating point register. */
2166 sh_insn_uses_freg (insn, op, freg)
2168 const struct sh_opcode *op;
2175 /* We can't tell if this is a double-precision insn, so just play safe
2176 and assume that it might be. So not only have we test FREG against
2177 itself, but also even FREG against FREG+1 - if the using insn uses
2178 just the low part of a double precision value - but also an odd
2179 FREG against FREG-1 - if the setting insn sets just the low part
2180 of a double precision value.
2181 So what this all boils down to is that we have to ignore the lowest
2182 bit of the register number. */
2184 if ((f & USESF1) != 0
2185 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2187 if ((f & USESF2) != 0
2188 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2190 if ((f & USESF0) != 0
2197 /* See whether an instruction sets a floating point register. */
2200 sh_insn_sets_freg (insn, op, freg)
2202 const struct sh_opcode *op;
2209 /* We can't tell if this is a double-precision insn, so just play safe
2210 and assume that it might be. So not only have we test FREG against
2211 itself, but also even FREG against FREG+1 - if the using insn uses
2212 just the low part of a double precision value - but also an odd
2213 FREG against FREG-1 - if the setting insn sets just the low part
2214 of a double precision value.
2215 So what this all boils down to is that we have to ignore the lowest
2216 bit of the register number. */
2218 if ((f & SETSF1) != 0
2219 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2225 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2226 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2227 This should return TRUE if there is a conflict, or FALSE if the
2228 instructions can be swapped safely. */
2231 sh_insns_conflict (i1, op1, i2, op2)
2233 const struct sh_opcode *op1;
2235 const struct sh_opcode *op2;
2237 unsigned int f1, f2;
2242 /* Load of fpscr conflicts with floating point operations.
2243 FIXME: shouldn't test raw opcodes here. */
2244 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2245 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2248 if ((f1 & (BRANCH | DELAY)) != 0
2249 || (f2 & (BRANCH | DELAY)) != 0)
2252 if (((f1 | f2) & SETSSP)
2253 && (f1 & (SETSSP | USESSP))
2254 && (f2 & (SETSSP | USESSP)))
2257 if ((f1 & SETS1) != 0
2258 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2260 if ((f1 & SETS2) != 0
2261 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2263 if ((f1 & SETSR0) != 0
2264 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2267 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2269 if ((f1 & SETSF1) != 0
2270 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2273 if ((f2 & SETS1) != 0
2274 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2276 if ((f2 & SETS2) != 0
2277 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2279 if ((f2 & SETSR0) != 0
2280 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2283 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2285 if ((f2 & SETSF1) != 0
2286 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2289 /* The instructions do not conflict. */
2293 /* I1 is a load instruction, and I2 is some other instruction. Return
2294 TRUE if I1 loads a register which I2 uses. */
2297 sh_load_use (i1, op1, i2, op2)
2299 const struct sh_opcode *op1;
2301 const struct sh_opcode *op2;
2307 if ((f1 & LOAD) == 0)
2310 /* If both SETS1 and SETSSP are set, that means a load to a special
2311 register using postincrement addressing mode, which we don't care
2313 if ((f1 & SETS1) != 0
2314 && (f1 & SETSSP) == 0
2315 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2318 if ((f1 & SETSR0) != 0
2319 && sh_insn_uses_reg (i2, op2, 0))
2322 if ((f1 & SETSF1) != 0
2323 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2329 /* Try to align loads and stores within a span of memory. This is
2330 called by both the ELF and the COFF sh targets. ABFD and SEC are
2331 the BFD and section we are examining. CONTENTS is the contents of
2332 the section. SWAP is the routine to call to swap two instructions.
2333 RELOCS is a pointer to the internal relocation information, to be
2334 passed to SWAP. PLABEL is a pointer to the current label in a
2335 sorted list of labels; LABEL_END is the end of the list. START and
2336 STOP are the range of memory to examine. If a swap is made,
2337 *PSWAPPED is set to TRUE. */
2343 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2344 plabel, label_end, start, stop, pswapped)
2348 bfd_boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2354 bfd_boolean *pswapped;
2356 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2357 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2360 /* The SH4 has a Harvard architecture, hence aligning loads is not
2361 desirable. In fact, it is counter-productive, since it interferes
2362 with the schedules generated by the compiler. */
2363 if (abfd->arch_info->mach == bfd_mach_sh4)
2366 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2370 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2371 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2374 /* Instructions should be aligned on 2 byte boundaries. */
2375 if ((start & 1) == 1)
2378 /* Now look through the unaligned addresses. */
2382 for (; i < stop; i += 4)
2385 const struct sh_opcode *op;
2386 unsigned int prev_insn = 0;
2387 const struct sh_opcode *prev_op = NULL;
2389 insn = bfd_get_16 (abfd, contents + i);
2390 op = sh_insn_info (insn);
2392 || (op->flags & (LOAD | STORE)) == 0)
2395 /* This is a load or store which is not on a four byte boundary. */
2397 while (*plabel < label_end && **plabel < i)
2402 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2403 /* If INSN is the field b of a parallel processing insn, it is not
2404 a load / store after all. Note that the test here might mistake
2405 the field_b of a pcopy insn for the starting code of a parallel
2406 processing insn; this might miss a swapping opportunity, but at
2407 least we're on the safe side. */
2408 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2411 /* Check if prev_insn is actually the field b of a parallel
2412 processing insn. Again, this can give a spurious match
2414 if (dsp && i - 2 > start)
2416 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2418 if ((pprev_insn & 0xfc00) == 0xf800)
2421 prev_op = sh_insn_info (prev_insn);
2424 prev_op = sh_insn_info (prev_insn);
2426 /* If the load/store instruction is in a delay slot, we
2429 || (prev_op->flags & DELAY) != 0)
2433 && (*plabel >= label_end || **plabel != i)
2435 && (prev_op->flags & (LOAD | STORE)) == 0
2436 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2440 /* The load/store instruction does not have a label, and
2441 there is a previous instruction; PREV_INSN is not
2442 itself a load/store instruction, and PREV_INSN and
2443 INSN do not conflict. */
2449 unsigned int prev2_insn;
2450 const struct sh_opcode *prev2_op;
2452 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2453 prev2_op = sh_insn_info (prev2_insn);
2455 /* If the instruction before PREV_INSN has a delay
2456 slot--that is, PREV_INSN is in a delay slot--we
2458 if (prev2_op == NULL
2459 || (prev2_op->flags & DELAY) != 0)
2462 /* If the instruction before PREV_INSN is a load,
2463 and it sets a register which INSN uses, then
2464 putting INSN immediately after PREV_INSN will
2465 cause a pipeline bubble, so there is no point to
2468 && (prev2_op->flags & LOAD) != 0
2469 && sh_load_use (prev2_insn, prev2_op, insn, op))
2475 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2482 while (*plabel < label_end && **plabel < i + 2)
2486 && (*plabel >= label_end || **plabel != i + 2))
2488 unsigned int next_insn;
2489 const struct sh_opcode *next_op;
2491 /* There is an instruction after the load/store
2492 instruction, and it does not have a label. */
2493 next_insn = bfd_get_16 (abfd, contents + i + 2);
2494 next_op = sh_insn_info (next_insn);
2496 && (next_op->flags & (LOAD | STORE)) == 0
2497 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2501 /* NEXT_INSN is not itself a load/store instruction,
2502 and it does not conflict with INSN. */
2506 /* If PREV_INSN is a load, and it sets a register
2507 which NEXT_INSN uses, then putting NEXT_INSN
2508 immediately after PREV_INSN will cause a pipeline
2509 bubble, so there is no reason to make this swap. */
2511 && (prev_op->flags & LOAD) != 0
2512 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2515 /* If INSN is a load, and it sets a register which
2516 the insn after NEXT_INSN uses, then doing the
2517 swap will cause a pipeline bubble, so there is no
2518 reason to make the swap. However, if the insn
2519 after NEXT_INSN is itself a load or store
2520 instruction, then it is misaligned, so
2521 optimistically hope that it will be swapped
2522 itself, and just live with the pipeline bubble if
2526 && (op->flags & LOAD) != 0)
2528 unsigned int next2_insn;
2529 const struct sh_opcode *next2_op;
2531 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2532 next2_op = sh_insn_info (next2_insn);
2533 if (next2_op == NULL
2534 || ((next2_op->flags & (LOAD | STORE)) == 0
2535 && sh_load_use (insn, op, next2_insn, next2_op)))
2541 if (! (*swap) (abfd, sec, relocs, contents, i))
2552 #endif /* not COFF_IMAGE_WITH_PE */
2554 /* Look for loads and stores which we can align to four byte
2555 boundaries. See the longer comment above sh_relax_section for why
2556 this is desirable. This sets *PSWAPPED if some instruction was
2560 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2563 struct internal_reloc *internal_relocs;
2565 bfd_boolean *pswapped;
2567 struct internal_reloc *irel, *irelend;
2568 bfd_vma *labels = NULL;
2569 bfd_vma *label, *label_end;
2574 irelend = internal_relocs + sec->reloc_count;
2576 /* Get all the addresses with labels on them. */
2577 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2578 labels = (bfd_vma *) bfd_malloc (amt);
2582 for (irel = internal_relocs; irel < irelend; irel++)
2584 if (irel->r_type == R_SH_LABEL)
2586 *label_end = irel->r_vaddr - sec->vma;
2591 /* Note that the assembler currently always outputs relocs in
2592 address order. If that ever changes, this code will need to sort
2593 the label values and the relocs. */
2597 for (irel = internal_relocs; irel < irelend; irel++)
2599 bfd_vma start, stop;
2601 if (irel->r_type != R_SH_CODE)
2604 start = irel->r_vaddr - sec->vma;
2606 for (irel++; irel < irelend; irel++)
2607 if (irel->r_type == R_SH_DATA)
2610 stop = irel->r_vaddr - sec->vma;
2614 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2615 (PTR) internal_relocs, &label,
2616 label_end, start, stop, pswapped))
2630 /* Swap two SH instructions. */
2633 sh_swap_insns (abfd, sec, relocs, contents, addr)
2640 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2641 unsigned short i1, i2;
2642 struct internal_reloc *irel, *irelend;
2644 /* Swap the instructions themselves. */
2645 i1 = bfd_get_16 (abfd, contents + addr);
2646 i2 = bfd_get_16 (abfd, contents + addr + 2);
2647 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2648 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2650 /* Adjust all reloc addresses. */
2651 irelend = internal_relocs + sec->reloc_count;
2652 for (irel = internal_relocs; irel < irelend; irel++)
2656 /* There are a few special types of relocs that we don't want to
2657 adjust. These relocs do not apply to the instruction itself,
2658 but are only associated with the address. */
2659 type = irel->r_type;
2660 if (type == R_SH_ALIGN
2661 || type == R_SH_CODE
2662 || type == R_SH_DATA
2663 || type == R_SH_LABEL)
2666 /* If an R_SH_USES reloc points to one of the addresses being
2667 swapped, we must adjust it. It would be incorrect to do this
2668 for a jump, though, since we want to execute both
2669 instructions after the jump. (We have avoided swapping
2670 around a label, so the jump will not wind up executing an
2671 instruction it shouldn't). */
2672 if (type == R_SH_USES)
2676 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2678 irel->r_offset += 2;
2679 else if (off == addr + 2)
2680 irel->r_offset -= 2;
2683 if (irel->r_vaddr - sec->vma == addr)
2688 else if (irel->r_vaddr - sec->vma == addr + 2)
2699 unsigned short insn, oinsn;
2700 bfd_boolean overflow;
2702 loc = contents + irel->r_vaddr - sec->vma;
2709 case R_SH_PCDISP8BY2:
2710 case R_SH_PCRELIMM8BY2:
2711 insn = bfd_get_16 (abfd, loc);
2714 if ((oinsn & 0xff00) != (insn & 0xff00))
2716 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2720 insn = bfd_get_16 (abfd, loc);
2723 if ((oinsn & 0xf000) != (insn & 0xf000))
2725 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2728 case R_SH_PCRELIMM8BY4:
2729 /* This reloc ignores the least significant 3 bits of
2730 the program counter before adding in the offset.
2731 This means that if ADDR is at an even address, the
2732 swap will not affect the offset. If ADDR is an at an
2733 odd address, then the instruction will be crossing a
2734 four byte boundary, and must be adjusted. */
2735 if ((addr & 3) != 0)
2737 insn = bfd_get_16 (abfd, loc);
2740 if ((oinsn & 0xff00) != (insn & 0xff00))
2742 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2750 ((*_bfd_error_handler)
2751 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2752 abfd, (unsigned long) irel->r_vaddr));
2753 bfd_set_error (bfd_error_bad_value);
2762 /* This is a modification of _bfd_coff_generic_relocate_section, which
2763 will handle SH relaxing. */
2766 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2767 relocs, syms, sections)
2768 bfd *output_bfd ATTRIBUTE_UNUSED;
2769 struct bfd_link_info *info;
2771 asection *input_section;
2773 struct internal_reloc *relocs;
2774 struct internal_syment *syms;
2775 asection **sections;
2777 struct internal_reloc *rel;
2778 struct internal_reloc *relend;
2781 relend = rel + input_section->reloc_count;
2782 for (; rel < relend; rel++)
2785 struct coff_link_hash_entry *h;
2786 struct internal_syment *sym;
2789 reloc_howto_type *howto;
2790 bfd_reloc_status_type rstat;
2792 /* Almost all relocs have to do with relaxing. If any work must
2793 be done for them, it has been done in sh_relax_section. */
2794 if (rel->r_type != R_SH_IMM32
2796 && rel->r_type != R_SH_IMM32CE
2797 && rel->r_type != R_SH_IMAGEBASE
2799 && rel->r_type != R_SH_PCDISP)
2802 symndx = rel->r_symndx;
2812 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2814 (*_bfd_error_handler)
2815 ("%B: illegal symbol index %ld in relocs",
2817 bfd_set_error (bfd_error_bad_value);
2820 h = obj_coff_sym_hashes (input_bfd)[symndx];
2821 sym = syms + symndx;
2824 if (sym != NULL && sym->n_scnum != 0)
2825 addend = - sym->n_value;
2829 if (rel->r_type == R_SH_PCDISP)
2832 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2835 howto = &sh_coff_howtos[rel->r_type];
2839 bfd_set_error (bfd_error_bad_value);
2844 if (rel->r_type == R_SH_IMAGEBASE)
2845 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2854 /* There is nothing to do for an internal PCDISP reloc. */
2855 if (rel->r_type == R_SH_PCDISP)
2860 sec = bfd_abs_section_ptr;
2865 sec = sections[symndx];
2866 val = (sec->output_section->vma
2867 + sec->output_offset
2874 if (h->root.type == bfd_link_hash_defined
2875 || h->root.type == bfd_link_hash_defweak)
2879 sec = h->root.u.def.section;
2880 val = (h->root.u.def.value
2881 + sec->output_section->vma
2882 + sec->output_offset);
2884 else if (! info->relocatable)
2886 if (! ((*info->callbacks->undefined_symbol)
2887 (info, h->root.root.string, input_bfd, input_section,
2888 rel->r_vaddr - input_section->vma, TRUE)))
2893 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2895 rel->r_vaddr - input_section->vma,
2904 case bfd_reloc_overflow:
2907 char buf[SYMNMLEN + 1];
2913 else if (sym->_n._n_n._n_zeroes == 0
2914 && sym->_n._n_n._n_offset != 0)
2915 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2918 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2919 buf[SYMNMLEN] = '\0';
2923 if (! ((*info->callbacks->reloc_overflow)
2924 (info, (h ? &h->root : NULL), name, howto->name,
2925 (bfd_vma) 0, input_bfd, input_section,
2926 rel->r_vaddr - input_section->vma)))
2935 /* This is a version of bfd_generic_get_relocated_section_contents
2936 which uses sh_relocate_section. */
2939 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2940 data, relocatable, symbols)
2942 struct bfd_link_info *link_info;
2943 struct bfd_link_order *link_order;
2945 bfd_boolean relocatable;
2948 asection *input_section = link_order->u.indirect.section;
2949 bfd *input_bfd = input_section->owner;
2950 asection **sections = NULL;
2951 struct internal_reloc *internal_relocs = NULL;
2952 struct internal_syment *internal_syms = NULL;
2954 /* We only need to handle the case of relaxing, or of having a
2955 particular set of section contents, specially. */
2957 || coff_section_data (input_bfd, input_section) == NULL
2958 || coff_section_data (input_bfd, input_section)->contents == NULL)
2959 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2964 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2965 (size_t) input_section->size);
2967 if ((input_section->flags & SEC_RELOC) != 0
2968 && input_section->reloc_count > 0)
2970 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2971 bfd_byte *esym, *esymend;
2972 struct internal_syment *isymp;
2976 if (! _bfd_coff_get_external_symbols (input_bfd))
2979 internal_relocs = (_bfd_coff_read_internal_relocs
2980 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2981 FALSE, (struct internal_reloc *) NULL));
2982 if (internal_relocs == NULL)
2985 amt = obj_raw_syment_count (input_bfd);
2986 amt *= sizeof (struct internal_syment);
2987 internal_syms = (struct internal_syment *) bfd_malloc (amt);
2988 if (internal_syms == NULL)
2991 amt = obj_raw_syment_count (input_bfd);
2992 amt *= sizeof (asection *);
2993 sections = (asection **) bfd_malloc (amt);
2994 if (sections == NULL)
2997 isymp = internal_syms;
2999 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3000 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3001 while (esym < esymend)
3003 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3005 if (isymp->n_scnum != 0)
3006 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3009 if (isymp->n_value == 0)
3010 *secpp = bfd_und_section_ptr;
3012 *secpp = bfd_com_section_ptr;
3015 esym += (isymp->n_numaux + 1) * symesz;
3016 secpp += isymp->n_numaux + 1;
3017 isymp += isymp->n_numaux + 1;
3020 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3021 input_section, data, internal_relocs,
3022 internal_syms, sections))
3027 free (internal_syms);
3028 internal_syms = NULL;
3029 free (internal_relocs);
3030 internal_relocs = NULL;
3036 if (internal_relocs != NULL)
3037 free (internal_relocs);
3038 if (internal_syms != NULL)
3039 free (internal_syms);
3040 if (sections != NULL)
3045 /* The target vectors. */
3047 #ifndef TARGET_SHL_SYM
3048 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3051 #ifdef TARGET_SHL_SYM
3052 #define TARGET_SYM TARGET_SHL_SYM
3054 #define TARGET_SYM shlcoff_vec
3057 #ifndef TARGET_SHL_NAME
3058 #define TARGET_SHL_NAME "coff-shl"
3062 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3063 SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3065 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3066 0, '_', NULL, COFF_SWAP_TABLE)
3069 #ifndef TARGET_SHL_SYM
3070 static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3071 static bfd_boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3072 /* Some people want versions of the SH COFF target which do not align
3073 to 16 byte boundaries. We implement that by adding a couple of new
3074 target vectors. These are just like the ones above, but they
3075 change the default section alignment. To generate them in the
3076 assembler, use -small. To use them in the linker, use -b
3077 coff-sh{l}-small and -oformat coff-sh{l}-small.
3079 Yes, this is a horrible hack. A general solution for setting
3080 section alignment in COFF is rather complex. ELF handles this
3083 /* Only recognize the small versions if the target was not defaulted.
3084 Otherwise we won't recognize the non default endianness. */
3086 static const bfd_target *
3087 coff_small_object_p (abfd)
3090 if (abfd->target_defaulted)
3092 bfd_set_error (bfd_error_wrong_format);
3095 return coff_object_p (abfd);
3098 /* Set the section alignment for the small versions. */
3101 coff_small_new_section_hook (abfd, section)
3105 if (! coff_new_section_hook (abfd, section))
3108 /* We must align to at least a four byte boundary, because longword
3109 accesses must be on a four byte boundary. */
3110 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3111 section->alignment_power = 2;
3116 /* This is copied from bfd_coff_std_swap_table so that we can change
3117 the default section alignment power. */
3119 static const bfd_coff_backend_data bfd_coff_small_swap_table =
3121 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3122 coff_swap_aux_out, coff_swap_sym_out,
3123 coff_swap_lineno_out, coff_swap_reloc_out,
3124 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3125 coff_swap_scnhdr_out,
3126 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3127 #ifdef COFF_LONG_FILENAMES
3132 #ifdef COFF_LONG_SECTION_NAMES
3138 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3143 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3148 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3149 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3150 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3151 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3152 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3153 coff_classify_symbol, coff_compute_section_file_positions,
3154 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3155 coff_adjust_symndx, coff_link_add_one_symbol,
3156 coff_link_output_has_begun, coff_final_link_postscript,
3160 #define coff_small_close_and_cleanup \
3161 coff_close_and_cleanup
3162 #define coff_small_bfd_free_cached_info \
3163 coff_bfd_free_cached_info
3164 #define coff_small_get_section_contents \
3165 coff_get_section_contents
3166 #define coff_small_get_section_contents_in_window \
3167 coff_get_section_contents_in_window
3169 extern const bfd_target shlcoff_small_vec;
3171 const bfd_target shcoff_small_vec =
3173 "coff-sh-small", /* name */
3174 bfd_target_coff_flavour,
3175 BFD_ENDIAN_BIG, /* data byte order is big */
3176 BFD_ENDIAN_BIG, /* header byte order is big */
3178 (HAS_RELOC | EXEC_P | /* object flags */
3179 HAS_LINENO | HAS_DEBUG |
3180 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3182 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3183 '_', /* leading symbol underscore */
3184 '/', /* ar_pad_char */
3185 15, /* ar_max_namelen */
3186 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3187 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3188 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3189 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3190 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3191 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3193 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3194 bfd_generic_archive_p, _bfd_dummy_target},
3195 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3197 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3198 _bfd_write_archive_contents, bfd_false},
3200 BFD_JUMP_TABLE_GENERIC (coff_small),
3201 BFD_JUMP_TABLE_COPY (coff),
3202 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3203 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3204 BFD_JUMP_TABLE_SYMBOLS (coff),
3205 BFD_JUMP_TABLE_RELOCS (coff),
3206 BFD_JUMP_TABLE_WRITE (coff),
3207 BFD_JUMP_TABLE_LINK (coff),
3208 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3210 & shlcoff_small_vec,
3212 (PTR) &bfd_coff_small_swap_table
3215 const bfd_target shlcoff_small_vec =
3217 "coff-shl-small", /* name */
3218 bfd_target_coff_flavour,
3219 BFD_ENDIAN_LITTLE, /* data byte order is little */
3220 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3222 (HAS_RELOC | EXEC_P | /* object flags */
3223 HAS_LINENO | HAS_DEBUG |
3224 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3226 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3227 '_', /* leading symbol underscore */
3228 '/', /* ar_pad_char */
3229 15, /* ar_max_namelen */
3230 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3231 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3232 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3233 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3234 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3235 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3237 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3238 bfd_generic_archive_p, _bfd_dummy_target},
3239 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3241 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3242 _bfd_write_archive_contents, bfd_false},
3244 BFD_JUMP_TABLE_GENERIC (coff_small),
3245 BFD_JUMP_TABLE_COPY (coff),
3246 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3247 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3248 BFD_JUMP_TABLE_SYMBOLS (coff),
3249 BFD_JUMP_TABLE_RELOCS (coff),
3250 BFD_JUMP_TABLE_WRITE (coff),
3251 BFD_JUMP_TABLE_LINK (coff),
3252 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3256 (PTR) &bfd_coff_small_swap_table