1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104 case BFD_RELOC_SPU_IMM10W:
106 case BFD_RELOC_SPU_IMM16W:
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
114 case BFD_RELOC_SPU_PCREL16:
116 case BFD_RELOC_SPU_IMM7:
118 case BFD_RELOC_SPU_IMM8:
120 case BFD_RELOC_SPU_PCREL9a:
122 case BFD_RELOC_SPU_PCREL9b:
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
130 case BFD_RELOC_32_PCREL:
132 case BFD_RELOC_SPU_PPU32:
134 case BFD_RELOC_SPU_PPU64:
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
142 Elf_Internal_Rela *dst)
144 enum elf_spu_reloc_type r_type;
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
157 if (r_type == R_SPU_NONE)
160 return elf_howto_table + r_type;
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
184 bfd_size_type octets;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
199 /* Get symbol value. */
201 if (!bfd_is_com_section (symbol->section))
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
206 val += reloc_entry->addend;
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
229 if (!sec->used_by_bfd)
231 struct _spu_elf_section_data *sdata;
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
236 sec->used_by_bfd = sdata;
239 return _bfd_elf_new_section_hook (abfd, sec);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf;
260 /* Shortcuts to overlay sections. */
265 /* Count of stubs in each overlay section. */
266 unsigned int *stub_count;
268 /* The stub section for each overlay section. */
271 struct elf_link_hash_entry *ovly_load;
272 struct elf_link_hash_entry *ovly_return;
273 unsigned long ovly_load_r_symndx;
275 /* Number of overlay buffers. */
276 unsigned int num_buf;
278 /* Total number of overlays. */
279 unsigned int num_overlays;
281 /* Set if we should emit symbols for stubs. */
282 unsigned int emit_stub_syms:1;
284 /* Set if we want stubs on calls out of overlay regions to
285 non-overlay regions. */
286 unsigned int non_overlay_stubs : 1;
289 unsigned int stub_err : 1;
291 /* Set if stack size analysis should be done. */
292 unsigned int stack_analysis : 1;
294 /* Set if __stack_* syms will be emitted. */
295 unsigned int emit_stack_syms : 1;
298 /* Hijack the generic got fields for overlay stub accounting. */
302 struct got_entry *next;
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
311 /* Create a spu ELF linker hash table. */
313 static struct bfd_link_hash_table *
314 spu_elf_link_hash_table_create (bfd *abfd)
316 struct spu_link_hash_table *htab;
318 htab = bfd_malloc (sizeof (*htab));
322 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
323 _bfd_elf_link_hash_newfunc,
324 sizeof (struct elf_link_hash_entry)))
330 memset (&htab->ovtab, 0,
331 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
333 htab->elf.init_got_refcount.refcount = 0;
334 htab->elf.init_got_refcount.glist = NULL;
335 htab->elf.init_got_offset.offset = 0;
336 htab->elf.init_got_offset.glist = NULL;
337 return &htab->elf.root;
340 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
341 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
342 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
345 get_sym_h (struct elf_link_hash_entry **hp,
346 Elf_Internal_Sym **symp,
348 Elf_Internal_Sym **locsymsp,
349 unsigned long r_symndx,
352 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
354 if (r_symndx >= symtab_hdr->sh_info)
356 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
357 struct elf_link_hash_entry *h;
359 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
360 while (h->root.type == bfd_link_hash_indirect
361 || h->root.type == bfd_link_hash_warning)
362 h = (struct elf_link_hash_entry *) h->root.u.i.link;
372 asection *symsec = NULL;
373 if (h->root.type == bfd_link_hash_defined
374 || h->root.type == bfd_link_hash_defweak)
375 symsec = h->root.u.def.section;
381 Elf_Internal_Sym *sym;
382 Elf_Internal_Sym *locsyms = *locsymsp;
386 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
389 size_t symcount = symtab_hdr->sh_info;
391 /* If we are reading symbols into the contents, then
392 read the global syms too. This is done to cache
393 syms for later stack analysis. */
394 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
395 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
396 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
403 sym = locsyms + r_symndx;
413 asection *symsec = NULL;
414 if ((sym->st_shndx != SHN_UNDEF
415 && sym->st_shndx < SHN_LORESERVE)
416 || sym->st_shndx > SHN_HIRESERVE)
417 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
425 /* Create the note section if not already present. This is done early so
426 that the linker maps the sections to the right place in the output. */
429 spu_elf_create_sections (bfd *output_bfd,
430 struct bfd_link_info *info,
435 struct spu_link_hash_table *htab = spu_hash_table (info);
437 /* Stash some options away where we can get at them later. */
438 htab->stack_analysis = stack_analysis;
439 htab->emit_stack_syms = emit_stack_syms;
441 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
442 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
447 /* Make SPU_PTNOTE_SPUNAME section. */
454 ibfd = info->input_bfds;
455 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
456 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
458 || !bfd_set_section_alignment (ibfd, s, 4))
461 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
462 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
463 size += (name_len + 3) & -4;
465 if (!bfd_set_section_size (ibfd, s, size))
468 data = bfd_zalloc (ibfd, size);
472 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
473 bfd_put_32 (ibfd, name_len, data + 4);
474 bfd_put_32 (ibfd, 1, data + 8);
475 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
476 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
477 bfd_get_filename (output_bfd), name_len);
484 /* qsort predicate to sort sections by vma. */
487 sort_sections (const void *a, const void *b)
489 const asection *const *s1 = a;
490 const asection *const *s2 = b;
491 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
494 return delta < 0 ? -1 : 1;
496 return (*s1)->index - (*s2)->index;
499 /* Identify overlays in the output bfd, and number them. */
502 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
504 struct spu_link_hash_table *htab = spu_hash_table (info);
505 asection **alloc_sec;
506 unsigned int i, n, ovl_index, num_buf;
510 if (output_bfd->section_count < 2)
513 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
514 if (alloc_sec == NULL)
517 /* Pick out all the alloced sections. */
518 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
519 if ((s->flags & SEC_ALLOC) != 0
520 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
530 /* Sort them by vma. */
531 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
533 /* Look for overlapping vmas. Any with overlap must be overlays.
534 Count them. Also count the number of overlay regions. */
535 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
536 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
539 if (s->vma < ovl_end)
541 asection *s0 = alloc_sec[i - 1];
543 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
545 alloc_sec[ovl_index] = s0;
546 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
547 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
549 alloc_sec[ovl_index] = s;
550 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
551 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
552 if (s0->vma != s->vma)
554 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
555 "do not start at the same address.\n"),
559 if (ovl_end < s->vma + s->size)
560 ovl_end = s->vma + s->size;
563 ovl_end = s->vma + s->size;
566 htab->num_overlays = ovl_index;
567 htab->num_buf = num_buf;
568 htab->ovl_sec = alloc_sec;
569 return ovl_index != 0;
572 /* Support two sizes of overlay stubs, a slower more compact stub of two
573 intructions, and a faster stub of four instructions. */
574 #ifndef OVL_STUB_SIZE
575 /* Default to faster. */
576 #define OVL_STUB_SIZE 16
577 /* #define OVL_STUB_SIZE 8 */
579 #define BRSL 0x33000000
580 #define BR 0x32000000
581 #define NOP 0x40200000
582 #define LNOP 0x00200000
583 #define ILA 0x42000000
585 /* Return true for all relative and absolute branch instructions.
593 brhnz 00100011 0.. */
596 is_branch (const unsigned char *insn)
598 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
601 /* Return true for all indirect branch instructions.
609 bihnz 00100101 011 */
612 is_indirect_branch (const unsigned char *insn)
614 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
617 /* Return true for branch hint instructions.
622 is_hint (const unsigned char *insn)
624 return (insn[0] & 0xfc) == 0x10;
627 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
630 needs_ovl_stub (const char *sym_name,
632 asection *input_section,
633 struct spu_link_hash_table *htab,
634 bfd_boolean is_branch)
636 if (htab->num_overlays == 0)
640 || sym_sec->output_section == NULL
641 || spu_elf_section_data (sym_sec->output_section) == NULL)
644 /* setjmp always goes via an overlay stub, because then the return
645 and hence the longjmp goes via __ovly_return. That magically
646 makes setjmp/longjmp between overlays work. */
647 if (strncmp (sym_name, "setjmp", 6) == 0
648 && (sym_name[6] == '\0' || sym_name[6] == '@'))
651 /* Usually, symbols in non-overlay sections don't need stubs. */
652 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
653 && !htab->non_overlay_stubs)
656 /* A reference from some other section to a symbol in an overlay
657 section needs a stub. */
658 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
659 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
662 /* If this insn isn't a branch then we are possibly taking the
663 address of a function and passing it out somehow. */
667 enum _insn_type { non_branch, branch, call };
670 count_stub (struct spu_link_hash_table *htab,
673 enum _insn_type insn_type,
674 struct elf_link_hash_entry *h,
675 const Elf_Internal_Rela *irela)
677 unsigned int ovl = 0;
678 struct got_entry *g, **head;
681 /* If this instruction is a branch or call, we need a stub
682 for it. One stub per function per overlay.
683 If it isn't a branch, then we are taking the address of
684 this function so need a stub in the non-overlay area
685 for it. One stub per function. */
686 if (insn_type != non_branch)
687 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
690 head = &h->got.glist;
693 if (elf_local_got_ents (ibfd) == NULL)
695 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
696 * sizeof (*elf_local_got_ents (ibfd)));
697 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
698 if (elf_local_got_ents (ibfd) == NULL)
701 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
706 addend = irela->r_addend;
710 struct got_entry *gnext;
712 for (g = *head; g != NULL; g = g->next)
713 if (g->addend == addend && g->ovl == 0)
718 /* Need a new non-overlay area stub. Zap other stubs. */
719 for (g = *head; g != NULL; g = gnext)
722 if (g->addend == addend)
724 htab->stub_count[g->ovl] -= 1;
732 for (g = *head; g != NULL; g = g->next)
733 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
739 g = bfd_malloc (sizeof *g);
744 g->stub_addr = (bfd_vma) -1;
748 htab->stub_count[ovl] += 1;
754 /* Two instruction overlay stubs look like:
757 .word target_ovl_and_address
759 ovl_and_address is a word with the overlay number in the top 14 bits
760 and local store address in the bottom 18 bits.
762 Four instruction overlay stubs look like:
766 ila $79,target_address
770 build_stub (struct spu_link_hash_table *htab,
773 enum _insn_type insn_type,
774 struct elf_link_hash_entry *h,
775 const Elf_Internal_Rela *irela,
780 struct got_entry *g, **head;
782 bfd_vma addend, val, from, to;
785 if (insn_type != non_branch)
786 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
789 head = &h->got.glist;
791 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
795 addend = irela->r_addend;
797 for (g = *head; g != NULL; g = g->next)
798 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
803 if (g->ovl == 0 && ovl != 0)
806 if (g->stub_addr != (bfd_vma) -1)
809 sec = htab->stub_sec[ovl];
810 dest += dest_sec->output_offset + dest_sec->output_section->vma;
811 from = sec->size + sec->output_offset + sec->output_section->vma;
813 to = (htab->ovly_load->root.u.def.value
814 + htab->ovly_load->root.u.def.section->output_offset
815 + htab->ovly_load->root.u.def.section->output_section->vma);
817 if (OVL_STUB_SIZE == 16)
819 if (((dest | to | from) & 3) != 0
820 || val + 0x20000 >= 0x40000)
825 ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
827 if (OVL_STUB_SIZE == 16)
829 bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
830 sec->contents + sec->size);
831 bfd_put_32 (sec->owner, LNOP,
832 sec->contents + sec->size + 4);
833 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
834 sec->contents + sec->size + 8);
835 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
836 sec->contents + sec->size + 12);
838 else if (OVL_STUB_SIZE == 8)
840 bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
841 sec->contents + sec->size);
843 val = (dest & 0x3ffff) | (ovl << 14);
844 bfd_put_32 (sec->owner, val,
845 sec->contents + sec->size + 4);
849 sec->size += OVL_STUB_SIZE;
851 if (htab->emit_stub_syms)
857 len = 8 + sizeof (".ovl_call.") - 1;
859 len += strlen (h->root.root.string);
864 add = (int) irela->r_addend & 0xffffffff;
867 name = bfd_malloc (len);
871 sprintf (name, "%08x.ovl_call.", g->ovl);
873 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
875 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
876 dest_sec->id & 0xffffffff,
877 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
879 sprintf (name + len - 9, "+%x", add);
881 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
885 if (h->root.type == bfd_link_hash_new)
887 h->root.type = bfd_link_hash_defined;
888 h->root.u.def.section = sec;
889 h->root.u.def.value = sec->size - OVL_STUB_SIZE;
890 h->size = OVL_STUB_SIZE;
894 h->ref_regular_nonweak = 1;
903 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
907 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
909 /* Symbols starting with _SPUEAR_ need a stub because they may be
910 invoked by the PPU. */
911 if ((h->root.type == bfd_link_hash_defined
912 || h->root.type == bfd_link_hash_defweak)
914 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
916 struct spu_link_hash_table *htab = inf;
918 count_stub (htab, NULL, NULL, non_branch, h, NULL);
925 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
927 /* Symbols starting with _SPUEAR_ need a stub because they may be
928 invoked by the PPU. */
929 if ((h->root.type == bfd_link_hash_defined
930 || h->root.type == bfd_link_hash_defweak)
932 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
934 struct spu_link_hash_table *htab = inf;
936 build_stub (htab, NULL, NULL, non_branch, h, NULL,
937 h->root.u.def.value, h->root.u.def.section);
943 /* Size or build stubs. */
946 process_stubs (bfd *output_bfd,
947 struct bfd_link_info *info,
950 struct spu_link_hash_table *htab = spu_hash_table (info);
953 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
955 extern const bfd_target bfd_elf32_spu_vec;
956 Elf_Internal_Shdr *symtab_hdr;
958 Elf_Internal_Sym *local_syms = NULL;
961 if (ibfd->xvec != &bfd_elf32_spu_vec)
964 /* We'll need the symbol table in a second. */
965 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
966 if (symtab_hdr->sh_info == 0)
969 /* Arrange to read and keep global syms for later stack analysis. */
971 if (htab->stack_analysis)
972 psyms = &symtab_hdr->contents;
974 /* Walk over each section attached to the input bfd. */
975 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
977 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
979 /* If there aren't any relocs, then there's nothing more to do. */
980 if ((isec->flags & SEC_RELOC) == 0
981 || (isec->flags & SEC_ALLOC) == 0
982 || (isec->flags & SEC_LOAD) == 0
983 || isec->reloc_count == 0)
986 /* If this section is a link-once section that will be
987 discarded, then don't create any stubs. */
988 if (isec->output_section == NULL
989 || isec->output_section->owner != output_bfd)
992 /* Get the relocs. */
993 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
995 if (internal_relocs == NULL)
996 goto error_ret_free_local;
998 /* Now examine each relocation. */
999 irela = internal_relocs;
1000 irelaend = irela + isec->reloc_count;
1001 for (; irela < irelaend; irela++)
1003 enum elf_spu_reloc_type r_type;
1004 unsigned int r_indx;
1006 Elf_Internal_Sym *sym;
1007 struct elf_link_hash_entry *h;
1008 const char *sym_name;
1009 unsigned int sym_type;
1010 enum _insn_type insn_type;
1012 r_type = ELF32_R_TYPE (irela->r_info);
1013 r_indx = ELF32_R_SYM (irela->r_info);
1015 if (r_type >= R_SPU_max)
1017 bfd_set_error (bfd_error_bad_value);
1018 error_ret_free_internal:
1019 if (elf_section_data (isec)->relocs != internal_relocs)
1020 free (internal_relocs);
1021 error_ret_free_local:
1022 if (local_syms != NULL
1023 && (symtab_hdr->contents
1024 != (unsigned char *) local_syms))
1029 /* Determine the reloc target section. */
1030 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
1031 goto error_ret_free_internal;
1034 || sym_sec->output_section == NULL
1035 || sym_sec->output_section->owner != output_bfd)
1038 /* Ensure no stubs for user supplied overlay manager syms. */
1040 && (strcmp (h->root.root.string, "__ovly_load") == 0
1041 || strcmp (h->root.root.string, "__ovly_return") == 0))
1044 insn_type = non_branch;
1045 if (r_type == R_SPU_REL16
1046 || r_type == R_SPU_ADDR16)
1048 unsigned char insn[4];
1050 if (!bfd_get_section_contents (ibfd, isec, insn,
1051 irela->r_offset, 4))
1052 goto error_ret_free_internal;
1054 if (is_branch (insn) || is_hint (insn))
1057 if ((insn[0] & 0xfd) == 0x31)
1062 /* We are only interested in function symbols. */
1066 sym_name = h->root.root.string;
1070 sym_type = ELF_ST_TYPE (sym->st_info);
1071 sym_name = bfd_elf_sym_name (sym_sec->owner,
1077 if (sym_type != STT_FUNC)
1079 /* It's common for people to write assembly and forget
1080 to give function symbols the right type. Handle
1081 calls to such symbols, but warn so that (hopefully)
1082 people will fix their code. We need the symbol
1083 type to be correct to distinguish function pointer
1084 initialisation from other pointer initialisation. */
1085 if (insn_type == call)
1086 (*_bfd_error_handler) (_("warning: call to non-function"
1087 " symbol %s defined in %B"),
1088 sym_sec->owner, sym_name);
1089 else if (insn_type == non_branch)
1093 if (!needs_ovl_stub (sym_name, sym_sec, isec, htab,
1094 insn_type != non_branch))
1097 if (htab->stub_count == NULL)
1100 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1101 htab->stub_count = bfd_zmalloc (amt);
1102 if (htab->stub_count == NULL)
1103 goto error_ret_free_internal;
1108 if (!count_stub (htab, ibfd, isec, insn_type, h, irela))
1109 goto error_ret_free_internal;
1116 dest = h->root.u.def.value;
1118 dest = sym->st_value;
1119 dest += irela->r_addend;
1120 if (!build_stub (htab, ibfd, isec, insn_type, h, irela,
1122 goto error_ret_free_internal;
1126 /* We're done with the internal relocs, free them. */
1127 if (elf_section_data (isec)->relocs != internal_relocs)
1128 free (internal_relocs);
1131 if (local_syms != NULL
1132 && symtab_hdr->contents != (unsigned char *) local_syms)
1134 if (!info->keep_memory)
1137 symtab_hdr->contents = (unsigned char *) local_syms;
1144 /* Allocate space for overlay call and return stubs. */
1147 spu_elf_size_stubs (bfd *output_bfd,
1148 struct bfd_link_info *info,
1149 void (*place_spu_section) (asection *, asection *,
1151 int non_overlay_stubs)
1153 struct spu_link_hash_table *htab = spu_hash_table (info);
1160 htab->non_overlay_stubs = non_overlay_stubs;
1161 if (!process_stubs (output_bfd, info, FALSE))
1164 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, htab);
1168 if (htab->stub_count == NULL)
1171 ibfd = info->input_bfds;
1172 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1173 htab->stub_sec = bfd_zmalloc (amt);
1174 if (htab->stub_sec == NULL)
1177 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1178 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1179 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1180 htab->stub_sec[0] = stub;
1182 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1184 stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1185 (*place_spu_section) (stub, NULL, ".text");
1187 for (i = 0; i < htab->num_overlays; ++i)
1189 asection *osec = htab->ovl_sec[i];
1190 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1191 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1192 htab->stub_sec[ovl] = stub;
1194 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1196 stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1197 (*place_spu_section) (stub, osec, NULL);
1200 /* htab->ovtab consists of two arrays.
1210 . } _ovly_buf_table[];
1213 flags = (SEC_ALLOC | SEC_LOAD
1214 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1215 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1216 if (htab->ovtab == NULL
1217 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1220 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1221 (*place_spu_section) (htab->ovtab, NULL, ".data");
1223 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1224 if (htab->toe == NULL
1225 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1227 htab->toe->size = 16;
1228 (*place_spu_section) (htab->toe, NULL, ".toe");
1233 /* Functions to handle embedded spu_ovl.o object. */
1236 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1242 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1248 struct _ovl_stream *os;
1252 os = (struct _ovl_stream *) stream;
1253 max = (const char *) os->end - (const char *) os->start;
1255 if ((ufile_ptr) offset >= max)
1259 if (count > max - offset)
1260 count = max - offset;
1262 memcpy (buf, (const char *) os->start + offset, count);
1267 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1269 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1276 return *ovl_bfd != NULL;
1279 /* Define an STT_OBJECT symbol. */
1281 static struct elf_link_hash_entry *
1282 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1284 struct elf_link_hash_entry *h;
1286 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1290 if (h->root.type != bfd_link_hash_defined
1293 h->root.type = bfd_link_hash_defined;
1294 h->root.u.def.section = htab->ovtab;
1295 h->type = STT_OBJECT;
1298 h->ref_regular_nonweak = 1;
1303 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1304 h->root.u.def.section->owner,
1305 h->root.root.string);
1306 bfd_set_error (bfd_error_bad_value);
1313 /* Fill in all stubs and the overlay tables. */
1316 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
1318 struct spu_link_hash_table *htab = spu_hash_table (info);
1319 struct elf_link_hash_entry *h;
1325 htab->emit_stub_syms = emit_syms;
1326 if (htab->stub_count == NULL)
1329 for (i = 0; i <= htab->num_overlays; i++)
1330 if (htab->stub_sec[i]->size != 0)
1332 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1333 htab->stub_sec[i]->size);
1334 if (htab->stub_sec[i]->contents == NULL)
1336 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1337 htab->stub_sec[i]->size = 0;
1340 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1341 htab->ovly_load = h;
1342 BFD_ASSERT (h != NULL
1343 && (h->root.type == bfd_link_hash_defined
1344 || h->root.type == bfd_link_hash_defweak)
1347 s = h->root.u.def.section->output_section;
1348 if (spu_elf_section_data (s)->u.o.ovl_index)
1350 (*_bfd_error_handler) (_("%s in overlay section"),
1351 h->root.u.def.section->owner);
1352 bfd_set_error (bfd_error_bad_value);
1356 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1357 htab->ovly_return = h;
1359 /* Write out all the stubs. */
1360 obfd = htab->ovtab->output_section->owner;
1361 process_stubs (obfd, info, TRUE);
1363 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, htab);
1367 for (i = 0; i <= htab->num_overlays; i++)
1369 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1371 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1372 bfd_set_error (bfd_error_bad_value);
1375 htab->stub_sec[i]->rawsize = 0;
1380 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1381 bfd_set_error (bfd_error_bad_value);
1385 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1386 if (htab->ovtab->contents == NULL)
1389 /* Write out _ovly_table. */
1390 p = htab->ovtab->contents;
1391 /* set low bit of .size to mark non-overlay area as present. */
1393 for (s = obfd->sections; s != NULL; s = s->next)
1395 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1399 unsigned long off = ovl_index * 16;
1400 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1402 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1403 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1404 /* file_off written later in spu_elf_modify_program_headers. */
1405 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
1409 h = define_ovtab_symbol (htab, "_ovly_table");
1412 h->root.u.def.value = 16;
1413 h->size = htab->num_overlays * 16;
1415 h = define_ovtab_symbol (htab, "_ovly_table_end");
1418 h->root.u.def.value = htab->num_overlays * 16 + 16;
1421 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1424 h->root.u.def.value = htab->num_overlays * 16 + 16;
1425 h->size = htab->num_buf * 4;
1427 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1430 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1433 h = define_ovtab_symbol (htab, "_EAR_");
1436 h->root.u.def.section = htab->toe;
1437 h->root.u.def.value = 0;
1443 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1444 Search for stack adjusting insns, and return the sp delta. */
1447 find_function_stack_adjust (asection *sec, bfd_vma offset)
1452 memset (reg, 0, sizeof (reg));
1453 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1455 unsigned char buf[4];
1459 /* Assume no relocs on stack adjusing insns. */
1460 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1463 if (buf[0] == 0x24 /* stqd */)
1467 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1468 /* Partly decoded immediate field. */
1469 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1471 if (buf[0] == 0x1c /* ai */)
1474 imm = (imm ^ 0x200) - 0x200;
1475 reg[rt] = reg[ra] + imm;
1477 if (rt == 1 /* sp */)
1484 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1486 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1488 reg[rt] = reg[ra] + reg[rb];
1492 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1494 if (buf[0] >= 0x42 /* ila */)
1495 imm |= (buf[0] & 1) << 17;
1500 if (buf[0] == 0x40 /* il */)
1502 if ((buf[1] & 0x80) == 0)
1504 imm = (imm ^ 0x8000) - 0x8000;
1506 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1512 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1514 reg[rt] |= imm & 0xffff;
1517 else if (buf[0] == 0x04 /* ori */)
1520 imm = (imm ^ 0x200) - 0x200;
1521 reg[rt] = reg[ra] | imm;
1524 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1525 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1527 /* Used in pic reg load. Say rt is trashed. */
1531 else if (is_branch (buf) || is_indirect_branch (buf))
1532 /* If we hit a branch then we must be out of the prologue. */
1541 /* qsort predicate to sort symbols by section and value. */
1543 static Elf_Internal_Sym *sort_syms_syms;
1544 static asection **sort_syms_psecs;
1547 sort_syms (const void *a, const void *b)
1549 Elf_Internal_Sym *const *s1 = a;
1550 Elf_Internal_Sym *const *s2 = b;
1551 asection *sec1,*sec2;
1552 bfd_signed_vma delta;
1554 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1555 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1558 return sec1->index - sec2->index;
1560 delta = (*s1)->st_value - (*s2)->st_value;
1562 return delta < 0 ? -1 : 1;
1564 delta = (*s2)->st_size - (*s1)->st_size;
1566 return delta < 0 ? -1 : 1;
1568 return *s1 < *s2 ? -1 : 1;
1573 struct function_info *fun;
1574 struct call_info *next;
1578 struct function_info
1580 /* List of functions called. Also branches to hot/cold part of
1582 struct call_info *call_list;
1583 /* For hot/cold part of function, point to owner. */
1584 struct function_info *start;
1585 /* Symbol at start of function. */
1587 Elf_Internal_Sym *sym;
1588 struct elf_link_hash_entry *h;
1590 /* Function section. */
1592 /* Address range of (this part of) function. */
1596 /* Set if global symbol. */
1597 unsigned int global : 1;
1598 /* Set if known to be start of function (as distinct from a hunk
1599 in hot/cold section. */
1600 unsigned int is_func : 1;
1601 /* Flags used during call tree traversal. */
1602 unsigned int visit1 : 1;
1603 unsigned int non_root : 1;
1604 unsigned int visit2 : 1;
1605 unsigned int marking : 1;
1606 unsigned int visit3 : 1;
1609 struct spu_elf_stack_info
1613 /* Variable size array describing functions, one per contiguous
1614 address range belonging to a function. */
1615 struct function_info fun[1];
1618 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1619 entries for section SEC. */
1621 static struct spu_elf_stack_info *
1622 alloc_stack_info (asection *sec, int max_fun)
1624 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1627 amt = sizeof (struct spu_elf_stack_info);
1628 amt += (max_fun - 1) * sizeof (struct function_info);
1629 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1630 if (sec_data->u.i.stack_info != NULL)
1631 sec_data->u.i.stack_info->max_fun = max_fun;
1632 return sec_data->u.i.stack_info;
1635 /* Add a new struct function_info describing a (part of a) function
1636 starting at SYM_H. Keep the array sorted by address. */
1638 static struct function_info *
1639 maybe_insert_function (asection *sec,
1642 bfd_boolean is_func)
1644 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1645 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1651 sinfo = alloc_stack_info (sec, 20);
1658 Elf_Internal_Sym *sym = sym_h;
1659 off = sym->st_value;
1660 size = sym->st_size;
1664 struct elf_link_hash_entry *h = sym_h;
1665 off = h->root.u.def.value;
1669 for (i = sinfo->num_fun; --i >= 0; )
1670 if (sinfo->fun[i].lo <= off)
1675 /* Don't add another entry for an alias, but do update some
1677 if (sinfo->fun[i].lo == off)
1679 /* Prefer globals over local syms. */
1680 if (global && !sinfo->fun[i].global)
1682 sinfo->fun[i].global = TRUE;
1683 sinfo->fun[i].u.h = sym_h;
1686 sinfo->fun[i].is_func = TRUE;
1687 return &sinfo->fun[i];
1689 /* Ignore a zero-size symbol inside an existing function. */
1690 else if (sinfo->fun[i].hi > off && size == 0)
1691 return &sinfo->fun[i];
1694 if (++i < sinfo->num_fun)
1695 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1696 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1697 else if (i >= sinfo->max_fun)
1699 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1700 bfd_size_type old = amt;
1702 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1703 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1704 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1705 sinfo = bfd_realloc (sinfo, amt);
1708 memset ((char *) sinfo + old, 0, amt - old);
1709 sec_data->u.i.stack_info = sinfo;
1711 sinfo->fun[i].is_func = is_func;
1712 sinfo->fun[i].global = global;
1713 sinfo->fun[i].sec = sec;
1715 sinfo->fun[i].u.h = sym_h;
1717 sinfo->fun[i].u.sym = sym_h;
1718 sinfo->fun[i].lo = off;
1719 sinfo->fun[i].hi = off + size;
1720 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1721 sinfo->num_fun += 1;
1722 return &sinfo->fun[i];
1725 /* Return the name of FUN. */
1728 func_name (struct function_info *fun)
1732 Elf_Internal_Shdr *symtab_hdr;
1734 while (fun->start != NULL)
1738 return fun->u.h->root.root.string;
1741 if (fun->u.sym->st_name == 0)
1743 size_t len = strlen (sec->name);
1744 char *name = bfd_malloc (len + 10);
1747 sprintf (name, "%s+%lx", sec->name,
1748 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1752 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1753 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1756 /* Read the instruction at OFF in SEC. Return true iff the instruction
1757 is a nop, lnop, or stop 0 (all zero insn). */
1760 is_nop (asection *sec, bfd_vma off)
1762 unsigned char insn[4];
1764 if (off + 4 > sec->size
1765 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1767 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1769 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1774 /* Extend the range of FUN to cover nop padding up to LIMIT.
1775 Return TRUE iff some instruction other than a NOP was found. */
1778 insns_at_end (struct function_info *fun, bfd_vma limit)
1780 bfd_vma off = (fun->hi + 3) & -4;
1782 while (off < limit && is_nop (fun->sec, off))
1793 /* Check and fix overlapping function ranges. Return TRUE iff there
1794 are gaps in the current info we have about functions in SEC. */
1797 check_function_ranges (asection *sec, struct bfd_link_info *info)
1799 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1800 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1802 bfd_boolean gaps = FALSE;
1807 for (i = 1; i < sinfo->num_fun; i++)
1808 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1810 /* Fix overlapping symbols. */
1811 const char *f1 = func_name (&sinfo->fun[i - 1]);
1812 const char *f2 = func_name (&sinfo->fun[i]);
1814 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1815 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1817 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1820 if (sinfo->num_fun == 0)
1824 if (sinfo->fun[0].lo != 0)
1826 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1828 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1830 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1831 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1833 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1839 /* Search current function info for a function that contains address
1840 OFFSET in section SEC. */
1842 static struct function_info *
1843 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1845 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1846 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1850 hi = sinfo->num_fun;
1853 mid = (lo + hi) / 2;
1854 if (offset < sinfo->fun[mid].lo)
1856 else if (offset >= sinfo->fun[mid].hi)
1859 return &sinfo->fun[mid];
1861 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1866 /* Add CALLEE to CALLER call list if not already present. */
1869 insert_callee (struct function_info *caller, struct call_info *callee)
1871 struct call_info *p;
1872 for (p = caller->call_list; p != NULL; p = p->next)
1873 if (p->fun == callee->fun)
1875 /* Tail calls use less stack than normal calls. Retain entry
1876 for normal call over one for tail call. */
1877 if (p->is_tail > callee->is_tail)
1878 p->is_tail = callee->is_tail;
1881 callee->next = caller->call_list;
1882 caller->call_list = callee;
1886 /* Rummage through the relocs for SEC, looking for function calls.
1887 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1888 mark destination symbols on calls as being functions. Also
1889 look at branches, which may be tail calls or go to hot/cold
1890 section part of same function. */
1893 mark_functions_via_relocs (asection *sec,
1894 struct bfd_link_info *info,
1897 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1898 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1899 Elf_Internal_Sym *syms;
1901 static bfd_boolean warned;
1903 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1905 if (internal_relocs == NULL)
1908 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1909 psyms = &symtab_hdr->contents;
1910 syms = *(Elf_Internal_Sym **) psyms;
1911 irela = internal_relocs;
1912 irelaend = irela + sec->reloc_count;
1913 for (; irela < irelaend; irela++)
1915 enum elf_spu_reloc_type r_type;
1916 unsigned int r_indx;
1918 Elf_Internal_Sym *sym;
1919 struct elf_link_hash_entry *h;
1921 unsigned char insn[4];
1922 bfd_boolean is_call;
1923 struct function_info *caller;
1924 struct call_info *callee;
1926 r_type = ELF32_R_TYPE (irela->r_info);
1927 if (r_type != R_SPU_REL16
1928 && r_type != R_SPU_ADDR16)
1931 r_indx = ELF32_R_SYM (irela->r_info);
1932 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1936 || sym_sec->output_section == NULL
1937 || sym_sec->output_section->owner != sec->output_section->owner)
1940 if (!bfd_get_section_contents (sec->owner, sec, insn,
1941 irela->r_offset, 4))
1943 if (!is_branch (insn))
1946 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1947 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1951 if (!call_tree || !warned)
1952 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1953 " %B(%A), stack analysis incomplete\n"),
1954 sec->owner, sec, irela->r_offset,
1955 sym_sec->owner, sym_sec);
1959 is_call = (insn[0] & 0xfd) == 0x31;
1962 val = h->root.u.def.value;
1964 val = sym->st_value;
1965 val += irela->r_addend;
1969 struct function_info *fun;
1971 if (irela->r_addend != 0)
1973 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1976 fake->st_value = val;
1978 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
1982 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
1984 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
1987 if (irela->r_addend != 0
1988 && fun->u.sym != sym)
1993 caller = find_function (sec, irela->r_offset, info);
1996 callee = bfd_malloc (sizeof *callee);
2000 callee->fun = find_function (sym_sec, val, info);
2001 if (callee->fun == NULL)
2003 callee->is_tail = !is_call;
2004 if (!insert_callee (caller, callee))
2007 && !callee->fun->is_func
2008 && callee->fun->stack == 0)
2010 /* This is either a tail call or a branch from one part of
2011 the function to another, ie. hot/cold section. If the
2012 destination has been called by some other function then
2013 it is a separate function. We also assume that functions
2014 are not split across input files. */
2015 if (sec->owner != sym_sec->owner)
2017 callee->fun->start = NULL;
2018 callee->fun->is_func = TRUE;
2020 else if (callee->fun->start == NULL)
2021 callee->fun->start = caller;
2024 struct function_info *callee_start;
2025 struct function_info *caller_start;
2026 callee_start = callee->fun;
2027 while (callee_start->start)
2028 callee_start = callee_start->start;
2029 caller_start = caller;
2030 while (caller_start->start)
2031 caller_start = caller_start->start;
2032 if (caller_start != callee_start)
2034 callee->fun->start = NULL;
2035 callee->fun->is_func = TRUE;
2044 /* Handle something like .init or .fini, which has a piece of a function.
2045 These sections are pasted together to form a single function. */
2048 pasted_function (asection *sec, struct bfd_link_info *info)
2050 struct bfd_link_order *l;
2051 struct _spu_elf_section_data *sec_data;
2052 struct spu_elf_stack_info *sinfo;
2053 Elf_Internal_Sym *fake;
2054 struct function_info *fun, *fun_start;
2056 fake = bfd_zmalloc (sizeof (*fake));
2060 fake->st_size = sec->size;
2062 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2063 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2067 /* Find a function immediately preceding this section. */
2069 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2071 if (l->u.indirect.section == sec)
2073 if (fun_start != NULL)
2074 fun->start = fun_start;
2077 if (l->type == bfd_indirect_link_order
2078 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2079 && (sinfo = sec_data->u.i.stack_info) != NULL
2080 && sinfo->num_fun != 0)
2081 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2084 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2088 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2089 overlay stub sections. */
2092 interesting_section (asection *s, bfd *obfd)
2094 return (s->output_section != NULL
2095 && s->output_section->owner == obfd
2096 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2097 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2101 /* Map address ranges in code sections to functions. */
2104 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2108 Elf_Internal_Sym ***psym_arr;
2109 asection ***sec_arr;
2110 bfd_boolean gaps = FALSE;
2113 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2116 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2117 if (psym_arr == NULL)
2119 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2120 if (sec_arr == NULL)
2124 for (ibfd = info->input_bfds, bfd_idx = 0;
2126 ibfd = ibfd->link_next, bfd_idx++)
2128 extern const bfd_target bfd_elf32_spu_vec;
2129 Elf_Internal_Shdr *symtab_hdr;
2132 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2133 asection **psecs, **p;
2135 if (ibfd->xvec != &bfd_elf32_spu_vec)
2138 /* Read all the symbols. */
2139 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2140 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2144 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2147 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2149 symtab_hdr->contents = (void *) syms;
2154 /* Select defined function symbols that are going to be output. */
2155 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2158 psym_arr[bfd_idx] = psyms;
2159 psecs = bfd_malloc (symcount * sizeof (*psecs));
2162 sec_arr[bfd_idx] = psecs;
2163 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2164 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2165 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2169 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2170 if (s != NULL && interesting_section (s, output_bfd))
2173 symcount = psy - psyms;
2176 /* Sort them by section and offset within section. */
2177 sort_syms_syms = syms;
2178 sort_syms_psecs = psecs;
2179 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2181 /* Now inspect the function symbols. */
2182 for (psy = psyms; psy < psyms + symcount; )
2184 asection *s = psecs[*psy - syms];
2185 Elf_Internal_Sym **psy2;
2187 for (psy2 = psy; ++psy2 < psyms + symcount; )
2188 if (psecs[*psy2 - syms] != s)
2191 if (!alloc_stack_info (s, psy2 - psy))
2196 /* First install info about properly typed and sized functions.
2197 In an ideal world this will cover all code sections, except
2198 when partitioning functions into hot and cold sections,
2199 and the horrible pasted together .init and .fini functions. */
2200 for (psy = psyms; psy < psyms + symcount; ++psy)
2203 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2205 asection *s = psecs[sy - syms];
2206 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2211 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2212 if (interesting_section (sec, output_bfd))
2213 gaps |= check_function_ranges (sec, info);
2218 /* See if we can discover more function symbols by looking at
2220 for (ibfd = info->input_bfds, bfd_idx = 0;
2222 ibfd = ibfd->link_next, bfd_idx++)
2226 if (psym_arr[bfd_idx] == NULL)
2229 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2230 if (interesting_section (sec, output_bfd)
2231 && sec->reloc_count != 0)
2233 if (!mark_functions_via_relocs (sec, info, FALSE))
2238 for (ibfd = info->input_bfds, bfd_idx = 0;
2240 ibfd = ibfd->link_next, bfd_idx++)
2242 Elf_Internal_Shdr *symtab_hdr;
2244 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2247 if ((psyms = psym_arr[bfd_idx]) == NULL)
2250 psecs = sec_arr[bfd_idx];
2252 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2253 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2256 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2257 if (interesting_section (sec, output_bfd))
2258 gaps |= check_function_ranges (sec, info);
2262 /* Finally, install all globals. */
2263 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2267 s = psecs[sy - syms];
2269 /* Global syms might be improperly typed functions. */
2270 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2271 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2273 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2278 /* Some of the symbols we've installed as marking the
2279 beginning of functions may have a size of zero. Extend
2280 the range of such functions to the beginning of the
2281 next symbol of interest. */
2282 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2283 if (interesting_section (sec, output_bfd))
2285 struct _spu_elf_section_data *sec_data;
2286 struct spu_elf_stack_info *sinfo;
2288 sec_data = spu_elf_section_data (sec);
2289 sinfo = sec_data->u.i.stack_info;
2293 bfd_vma hi = sec->size;
2295 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2297 sinfo->fun[fun_idx].hi = hi;
2298 hi = sinfo->fun[fun_idx].lo;
2301 /* No symbols in this section. Must be .init or .fini
2302 or something similar. */
2303 else if (!pasted_function (sec, info))
2309 for (ibfd = info->input_bfds, bfd_idx = 0;
2311 ibfd = ibfd->link_next, bfd_idx++)
2313 if (psym_arr[bfd_idx] == NULL)
2316 free (psym_arr[bfd_idx]);
2317 free (sec_arr[bfd_idx]);
2326 /* Mark nodes in the call graph that are called by some other node. */
2329 mark_non_root (struct function_info *fun)
2331 struct call_info *call;
2334 for (call = fun->call_list; call; call = call->next)
2336 call->fun->non_root = TRUE;
2337 if (!call->fun->visit1)
2338 mark_non_root (call->fun);
2342 /* Remove cycles from the call graph. */
2345 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2347 struct call_info **callp, *call;
2350 fun->marking = TRUE;
2352 callp = &fun->call_list;
2353 while ((call = *callp) != NULL)
2355 if (!call->fun->visit2)
2356 call_graph_traverse (call->fun, info);
2357 else if (call->fun->marking)
2359 const char *f1 = func_name (fun);
2360 const char *f2 = func_name (call->fun);
2362 info->callbacks->info (_("Stack analysis will ignore the call "
2365 *callp = call->next;
2368 callp = &call->next;
2370 fun->marking = FALSE;
2373 /* Populate call_list for each function. */
2376 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2380 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2382 extern const bfd_target bfd_elf32_spu_vec;
2385 if (ibfd->xvec != &bfd_elf32_spu_vec)
2388 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2390 if (!interesting_section (sec, output_bfd)
2391 || sec->reloc_count == 0)
2394 if (!mark_functions_via_relocs (sec, info, TRUE))
2398 /* Transfer call info from hot/cold section part of function
2400 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2402 struct _spu_elf_section_data *sec_data;
2403 struct spu_elf_stack_info *sinfo;
2405 if ((sec_data = spu_elf_section_data (sec)) != NULL
2406 && (sinfo = sec_data->u.i.stack_info) != NULL)
2409 for (i = 0; i < sinfo->num_fun; ++i)
2411 struct function_info *start = sinfo->fun[i].start;
2415 struct call_info *call;
2417 while (start->start != NULL)
2418 start = start->start;
2419 call = sinfo->fun[i].call_list;
2420 while (call != NULL)
2422 struct call_info *call_next = call->next;
2423 if (!insert_callee (start, call))
2427 sinfo->fun[i].call_list = NULL;
2428 sinfo->fun[i].non_root = TRUE;
2435 /* Find the call graph root(s). */
2436 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2438 extern const bfd_target bfd_elf32_spu_vec;
2441 if (ibfd->xvec != &bfd_elf32_spu_vec)
2444 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2446 struct _spu_elf_section_data *sec_data;
2447 struct spu_elf_stack_info *sinfo;
2449 if ((sec_data = spu_elf_section_data (sec)) != NULL
2450 && (sinfo = sec_data->u.i.stack_info) != NULL)
2453 for (i = 0; i < sinfo->num_fun; ++i)
2454 if (!sinfo->fun[i].visit1)
2455 mark_non_root (&sinfo->fun[i]);
2460 /* Remove cycles from the call graph. We start from the root node(s)
2461 so that we break cycles in a reasonable place. */
2462 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2464 extern const bfd_target bfd_elf32_spu_vec;
2467 if (ibfd->xvec != &bfd_elf32_spu_vec)
2470 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2472 struct _spu_elf_section_data *sec_data;
2473 struct spu_elf_stack_info *sinfo;
2475 if ((sec_data = spu_elf_section_data (sec)) != NULL
2476 && (sinfo = sec_data->u.i.stack_info) != NULL)
2479 for (i = 0; i < sinfo->num_fun; ++i)
2480 if (!sinfo->fun[i].non_root)
2481 call_graph_traverse (&sinfo->fun[i], info);
2489 /* Descend the call graph for FUN, accumulating total stack required. */
2492 sum_stack (struct function_info *fun,
2493 struct bfd_link_info *info,
2494 int emit_stack_syms)
2496 struct call_info *call;
2497 struct function_info *max = NULL;
2498 bfd_vma max_stack = fun->stack;
2505 for (call = fun->call_list; call; call = call->next)
2507 stack = sum_stack (call->fun, info, emit_stack_syms);
2508 /* Include caller stack for normal calls, don't do so for
2509 tail calls. fun->stack here is local stack usage for
2512 stack += fun->stack;
2513 if (max_stack < stack)
2520 f1 = func_name (fun);
2521 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
2522 f1, (bfd_vma) fun->stack, max_stack);
2526 info->callbacks->minfo (_(" calls:\n"));
2527 for (call = fun->call_list; call; call = call->next)
2529 const char *f2 = func_name (call->fun);
2530 const char *ann1 = call->fun == max ? "*" : " ";
2531 const char *ann2 = call->is_tail ? "t" : " ";
2533 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2537 /* Now fun->stack holds cumulative stack. */
2538 fun->stack = max_stack;
2541 if (emit_stack_syms)
2543 struct spu_link_hash_table *htab = spu_hash_table (info);
2544 char *name = bfd_malloc (18 + strlen (f1));
2545 struct elf_link_hash_entry *h;
2549 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2550 sprintf (name, "__stack_%s", f1);
2552 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2554 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2557 && (h->root.type == bfd_link_hash_new
2558 || h->root.type == bfd_link_hash_undefined
2559 || h->root.type == bfd_link_hash_undefweak))
2561 h->root.type = bfd_link_hash_defined;
2562 h->root.u.def.section = bfd_abs_section_ptr;
2563 h->root.u.def.value = max_stack;
2568 h->ref_regular_nonweak = 1;
2569 h->forced_local = 1;
2578 /* Provide an estimate of total stack required. */
2581 spu_elf_stack_analysis (bfd *output_bfd,
2582 struct bfd_link_info *info,
2583 int emit_stack_syms)
2586 bfd_vma max_stack = 0;
2588 if (!discover_functions (output_bfd, info))
2591 if (!build_call_tree (output_bfd, info))
2594 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2595 info->callbacks->minfo (_("\nStack size for functions. "
2596 "Annotations: '*' max stack, 't' tail call\n"));
2597 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2599 extern const bfd_target bfd_elf32_spu_vec;
2602 if (ibfd->xvec != &bfd_elf32_spu_vec)
2605 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2607 struct _spu_elf_section_data *sec_data;
2608 struct spu_elf_stack_info *sinfo;
2610 if ((sec_data = spu_elf_section_data (sec)) != NULL
2611 && (sinfo = sec_data->u.i.stack_info) != NULL)
2614 for (i = 0; i < sinfo->num_fun; ++i)
2616 if (!sinfo->fun[i].non_root)
2621 stack = sum_stack (&sinfo->fun[i], info,
2623 f1 = func_name (&sinfo->fun[i]);
2624 info->callbacks->info (_(" %s: 0x%v\n"),
2626 if (max_stack < stack)
2634 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2638 /* Perform a final link. */
2641 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2643 struct spu_link_hash_table *htab = spu_hash_table (info);
2645 if (htab->stack_analysis
2646 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2647 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2649 return bfd_elf_final_link (output_bfd, info);
2652 /* Called when not normally emitting relocs, ie. !info->relocatable
2653 and !info->emitrelocations. Returns a count of special relocs
2654 that need to be emitted. */
2657 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2659 unsigned int count = 0;
2660 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2662 for (; relocs < relend; relocs++)
2664 int r_type = ELF32_R_TYPE (relocs->r_info);
2665 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2672 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2675 spu_elf_relocate_section (bfd *output_bfd,
2676 struct bfd_link_info *info,
2678 asection *input_section,
2680 Elf_Internal_Rela *relocs,
2681 Elf_Internal_Sym *local_syms,
2682 asection **local_sections)
2684 Elf_Internal_Shdr *symtab_hdr;
2685 struct elf_link_hash_entry **sym_hashes;
2686 Elf_Internal_Rela *rel, *relend;
2687 struct spu_link_hash_table *htab;
2689 bfd_boolean emit_these_relocs = FALSE;
2691 htab = spu_hash_table (info);
2692 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2693 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2696 relend = relocs + input_section->reloc_count;
2697 for (; rel < relend; rel++)
2700 reloc_howto_type *howto;
2701 unsigned long r_symndx;
2702 Elf_Internal_Sym *sym;
2704 struct elf_link_hash_entry *h;
2705 const char *sym_name;
2708 bfd_reloc_status_type r;
2709 bfd_boolean unresolved_reloc;
2712 r_symndx = ELF32_R_SYM (rel->r_info);
2713 r_type = ELF32_R_TYPE (rel->r_info);
2714 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2716 emit_these_relocs = TRUE;
2720 howto = elf_howto_table + r_type;
2721 unresolved_reloc = FALSE;
2726 if (r_symndx < symtab_hdr->sh_info)
2728 sym = local_syms + r_symndx;
2729 sec = local_sections[r_symndx];
2730 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2731 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2735 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2736 r_symndx, symtab_hdr, sym_hashes,
2738 unresolved_reloc, warned);
2739 sym_name = h->root.root.string;
2742 if (sec != NULL && elf_discarded_section (sec))
2744 /* For relocs against symbols from removed linkonce sections,
2745 or sections discarded by a linker script, we just want the
2746 section contents zeroed. Avoid any special processing. */
2747 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2753 if (info->relocatable)
2756 if (unresolved_reloc)
2758 (*_bfd_error_handler)
2759 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2761 bfd_get_section_name (input_bfd, input_section),
2762 (long) rel->r_offset,
2768 /* If this symbol is in an overlay area, we may need to relocate
2769 to the overlay stub. */
2770 addend = rel->r_addend;
2771 if (htab->stub_sec != NULL
2773 && sec->output_section != NULL
2774 && sec->output_section->owner == output_bfd
2776 || (h != htab->ovly_load && h != htab->ovly_return)))
2779 unsigned int sym_type;
2782 if (r_type == R_SPU_REL16
2783 || r_type == R_SPU_ADDR16)
2784 branch = (is_branch (contents + rel->r_offset)
2785 || is_hint (contents + rel->r_offset));
2790 sym_type = ELF_ST_TYPE (sym->st_info);
2792 if ((sym_type == STT_FUNC || branch)
2793 && needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2795 unsigned int ovl = 0;
2796 struct got_entry *g, **head;
2799 ovl = (spu_elf_section_data (input_section->output_section)
2803 head = &h->got.glist;
2805 head = elf_local_got_ents (input_bfd) + r_symndx;
2807 for (g = *head; g != NULL; g = g->next)
2808 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
2813 relocation = g->stub_addr;
2818 r = _bfd_final_link_relocate (howto,
2822 rel->r_offset, relocation, addend);
2824 if (r != bfd_reloc_ok)
2826 const char *msg = (const char *) 0;
2830 case bfd_reloc_overflow:
2831 if (!((*info->callbacks->reloc_overflow)
2832 (info, (h ? &h->root : NULL), sym_name, howto->name,
2833 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2837 case bfd_reloc_undefined:
2838 if (!((*info->callbacks->undefined_symbol)
2839 (info, sym_name, input_bfd, input_section,
2840 rel->r_offset, TRUE)))
2844 case bfd_reloc_outofrange:
2845 msg = _("internal error: out of range error");
2848 case bfd_reloc_notsupported:
2849 msg = _("internal error: unsupported relocation error");
2852 case bfd_reloc_dangerous:
2853 msg = _("internal error: dangerous error");
2857 msg = _("internal error: unknown error");
2862 if (!((*info->callbacks->warning)
2863 (info, msg, sym_name, input_bfd, input_section,
2872 && emit_these_relocs
2873 && !info->relocatable
2874 && !info->emitrelocations)
2876 Elf_Internal_Rela *wrel;
2877 Elf_Internal_Shdr *rel_hdr;
2879 wrel = rel = relocs;
2880 relend = relocs + input_section->reloc_count;
2881 for (; rel < relend; rel++)
2885 r_type = ELF32_R_TYPE (rel->r_info);
2886 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2889 input_section->reloc_count = wrel - relocs;
2890 /* Backflips for _bfd_elf_link_output_relocs. */
2891 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2892 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2899 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2902 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2903 const char *sym_name ATTRIBUTE_UNUSED,
2904 Elf_Internal_Sym *sym,
2905 asection *sym_sec ATTRIBUTE_UNUSED,
2906 struct elf_link_hash_entry *h)
2908 struct spu_link_hash_table *htab = spu_hash_table (info);
2910 if (!info->relocatable
2911 && htab->stub_sec != NULL
2913 && (h->root.type == bfd_link_hash_defined
2914 || h->root.type == bfd_link_hash_defweak)
2916 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2918 struct got_entry *g;
2920 for (g = h->got.glist; g != NULL; g = g->next)
2921 if (g->addend == 0 && g->ovl == 0)
2923 sym->st_shndx = (_bfd_elf_section_from_bfd_section
2924 (htab->stub_sec[0]->output_section->owner,
2925 htab->stub_sec[0]->output_section));
2926 sym->st_value = g->stub_addr;
2934 static int spu_plugin = 0;
2937 spu_elf_plugin (int val)
2942 /* Set ELF header e_type for plugins. */
2945 spu_elf_post_process_headers (bfd *abfd,
2946 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2950 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2952 i_ehdrp->e_type = ET_DYN;
2956 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2957 segments for overlays. */
2960 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2962 struct spu_link_hash_table *htab = spu_hash_table (info);
2963 int extra = htab->num_overlays;
2969 sec = bfd_get_section_by_name (abfd, ".toe");
2970 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2976 /* Remove .toe section from other PT_LOAD segments and put it in
2977 a segment of its own. Put overlays in separate segments too. */
2980 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2983 struct elf_segment_map *m;
2989 toe = bfd_get_section_by_name (abfd, ".toe");
2990 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2991 if (m->p_type == PT_LOAD && m->count > 1)
2992 for (i = 0; i < m->count; i++)
2993 if ((s = m->sections[i]) == toe
2994 || spu_elf_section_data (s)->u.o.ovl_index != 0)
2996 struct elf_segment_map *m2;
2999 if (i + 1 < m->count)
3001 amt = sizeof (struct elf_segment_map);
3002 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
3003 m2 = bfd_zalloc (abfd, amt);
3006 m2->count = m->count - (i + 1);
3007 memcpy (m2->sections, m->sections + i + 1,
3008 m2->count * sizeof (m->sections[0]));
3009 m2->p_type = PT_LOAD;
3017 amt = sizeof (struct elf_segment_map);
3018 m2 = bfd_zalloc (abfd, amt);
3021 m2->p_type = PT_LOAD;
3023 m2->sections[0] = s;
3033 /* Check that all loadable section VMAs lie in the range
3034 LO .. HI inclusive. */
3037 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3039 struct elf_segment_map *m;
3042 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3043 if (m->p_type == PT_LOAD)
3044 for (i = 0; i < m->count; i++)
3045 if (m->sections[i]->size != 0
3046 && (m->sections[i]->vma < lo
3047 || m->sections[i]->vma > hi
3048 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3049 return m->sections[i];
3054 /* Tweak the section type of .note.spu_name. */
3057 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3058 Elf_Internal_Shdr *hdr,
3061 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3062 hdr->sh_type = SHT_NOTE;
3066 /* Tweak phdrs before writing them out. */
3069 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3071 const struct elf_backend_data *bed;
3072 struct elf_obj_tdata *tdata;
3073 Elf_Internal_Phdr *phdr, *last;
3074 struct spu_link_hash_table *htab;
3081 bed = get_elf_backend_data (abfd);
3082 tdata = elf_tdata (abfd);
3084 count = tdata->program_header_size / bed->s->sizeof_phdr;
3085 htab = spu_hash_table (info);
3086 if (htab->num_overlays != 0)
3088 struct elf_segment_map *m;
3091 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3093 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
3095 /* Mark this as an overlay header. */
3096 phdr[i].p_flags |= PF_OVERLAY;
3098 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3100 bfd_byte *p = htab->ovtab->contents;
3101 unsigned int off = o * 16 + 8;
3103 /* Write file_off into _ovly_table. */
3104 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3109 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3110 of 16. This should always be possible when using the standard
3111 linker scripts, but don't create overlapping segments if
3112 someone is playing games with linker scripts. */
3114 for (i = count; i-- != 0; )
3115 if (phdr[i].p_type == PT_LOAD)
3119 adjust = -phdr[i].p_filesz & 15;
3122 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3125 adjust = -phdr[i].p_memsz & 15;
3128 && phdr[i].p_filesz != 0
3129 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3130 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3133 if (phdr[i].p_filesz != 0)
3137 if (i == (unsigned int) -1)
3138 for (i = count; i-- != 0; )
3139 if (phdr[i].p_type == PT_LOAD)
3143 adjust = -phdr[i].p_filesz & 15;
3144 phdr[i].p_filesz += adjust;
3146 adjust = -phdr[i].p_memsz & 15;
3147 phdr[i].p_memsz += adjust;
3153 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3154 #define TARGET_BIG_NAME "elf32-spu"
3155 #define ELF_ARCH bfd_arch_spu
3156 #define ELF_MACHINE_CODE EM_SPU
3157 /* This matches the alignment need for DMA. */
3158 #define ELF_MAXPAGESIZE 0x80
3159 #define elf_backend_rela_normal 1
3160 #define elf_backend_can_gc_sections 1
3162 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3163 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3164 #define elf_info_to_howto spu_elf_info_to_howto
3165 #define elf_backend_count_relocs spu_elf_count_relocs
3166 #define elf_backend_relocate_section spu_elf_relocate_section
3167 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3168 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3169 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3170 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3172 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3173 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3174 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3175 #define elf_backend_post_process_headers spu_elf_post_process_headers
3176 #define elf_backend_fake_sections spu_elf_fake_sections
3177 #define elf_backend_special_sections spu_elf_special_sections
3178 #define bfd_elf32_bfd_final_link spu_elf_final_link
3180 #include "elf32-target.h"