1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
82 bfd_elf_generic_reloc, "SPU_PPU32",
83 FALSE, 0, 0xffffffff, FALSE),
84 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU64",
89 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
90 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94 static enum elf_spu_reloc_type
95 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
101 case BFD_RELOC_SPU_IMM10W:
103 case BFD_RELOC_SPU_IMM16W:
105 case BFD_RELOC_SPU_LO16:
106 return R_SPU_ADDR16_LO;
107 case BFD_RELOC_SPU_HI16:
108 return R_SPU_ADDR16_HI;
109 case BFD_RELOC_SPU_IMM18:
111 case BFD_RELOC_SPU_PCREL16:
113 case BFD_RELOC_SPU_IMM7:
115 case BFD_RELOC_SPU_IMM8:
117 case BFD_RELOC_SPU_PCREL9a:
119 case BFD_RELOC_SPU_PCREL9b:
121 case BFD_RELOC_SPU_IMM10:
122 return R_SPU_ADDR10I;
123 case BFD_RELOC_SPU_IMM16:
124 return R_SPU_ADDR16I;
127 case BFD_RELOC_32_PCREL:
129 case BFD_RELOC_SPU_PPU32:
131 case BFD_RELOC_SPU_PPU64:
137 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
139 Elf_Internal_Rela *dst)
141 enum elf_spu_reloc_type r_type;
143 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
144 BFD_ASSERT (r_type < R_SPU_max);
145 cache_ptr->howto = &elf_howto_table[(int) r_type];
148 static reloc_howto_type *
149 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
150 bfd_reloc_code_real_type code)
152 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
154 if (r_type == R_SPU_NONE)
157 return elf_howto_table + r_type;
160 static reloc_howto_type *
161 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
166 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
167 if (elf_howto_table[i].name != NULL
168 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
169 return &elf_howto_table[i];
174 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
176 static bfd_reloc_status_type
177 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
178 void *data, asection *input_section,
179 bfd *output_bfd, char **error_message)
181 bfd_size_type octets;
185 /* If this is a relocatable link (output_bfd test tells us), just
186 call the generic function. Any adjustment will be done at final
188 if (output_bfd != NULL)
189 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
190 input_section, output_bfd, error_message);
192 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
193 return bfd_reloc_outofrange;
194 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
196 /* Get symbol value. */
198 if (!bfd_is_com_section (symbol->section))
200 if (symbol->section->output_section)
201 val += symbol->section->output_section->vma;
203 val += reloc_entry->addend;
205 /* Make it pc-relative. */
206 val -= input_section->output_section->vma + input_section->output_offset;
209 if (val + 256 >= 512)
210 return bfd_reloc_overflow;
212 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
214 /* Move two high bits of value to REL9I and REL9 position.
215 The mask will take care of selecting the right field. */
216 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
217 insn &= ~reloc_entry->howto->dst_mask;
218 insn |= val & reloc_entry->howto->dst_mask;
219 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
224 spu_elf_new_section_hook (bfd *abfd, asection *sec)
226 if (!sec->used_by_bfd)
228 struct _spu_elf_section_data *sdata;
230 sdata = bfd_zalloc (abfd, sizeof (*sdata));
233 sec->used_by_bfd = sdata;
236 return _bfd_elf_new_section_hook (abfd, sec);
239 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
240 strip --strip-unneeded will not remove them. */
243 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
245 if (sym->name != NULL
246 && sym->section != bfd_abs_section_ptr
247 && strncmp (sym->name, "_EAR_", 5) == 0)
248 sym->flags |= BSF_KEEP;
251 /* SPU ELF linker hash table. */
253 struct spu_link_hash_table
255 struct elf_link_hash_table elf;
257 /* The stub hash table. */
258 struct bfd_hash_table stub_hash_table;
260 /* Shortcuts to overlay sections. */
264 struct elf_link_hash_entry *ovly_load;
266 /* An array of two output sections per overlay region, chosen such that
267 the first section vma is the overlay buffer vma (ie. the section has
268 the lowest vma in the group that occupy the region), and the second
269 section vma+size specifies the end of the region. We keep pointers
270 to sections like this because section vmas may change when laying
272 asection **ovl_region;
274 /* Number of overlay buffers. */
275 unsigned int num_buf;
277 /* Total number of overlays. */
278 unsigned int num_overlays;
280 /* Set if we should emit symbols for stubs. */
281 unsigned int emit_stub_syms:1;
283 /* Set if we want stubs on calls out of overlay regions to
284 non-overlay regions. */
285 unsigned int non_overlay_stubs : 1;
288 unsigned int stub_overflow : 1;
290 /* Set if stack size analysis should be done. */
291 unsigned int stack_analysis : 1;
293 /* Set if __stack_* syms will be emitted. */
294 unsigned int emit_stack_syms : 1;
297 #define spu_hash_table(p) \
298 ((struct spu_link_hash_table *) ((p)->hash))
300 struct spu_stub_hash_entry
302 struct bfd_hash_entry root;
304 /* Destination of this stub. */
305 asection *target_section;
308 /* Offset of entry in stub section. */
311 /* Offset from this stub to stub that loads the overlay index. */
315 /* Create an entry in a spu stub hash table. */
317 static struct bfd_hash_entry *
318 stub_hash_newfunc (struct bfd_hash_entry *entry,
319 struct bfd_hash_table *table,
322 /* Allocate the structure if it has not already been allocated by a
326 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
331 /* Call the allocation method of the superclass. */
332 entry = bfd_hash_newfunc (entry, table, string);
335 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
337 sh->target_section = NULL;
346 /* Create a spu ELF linker hash table. */
348 static struct bfd_link_hash_table *
349 spu_elf_link_hash_table_create (bfd *abfd)
351 struct spu_link_hash_table *htab;
353 htab = bfd_malloc (sizeof (*htab));
357 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
358 _bfd_elf_link_hash_newfunc,
359 sizeof (struct elf_link_hash_entry)))
365 /* Init the stub hash table too. */
366 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
367 sizeof (struct spu_stub_hash_entry)))
370 memset (&htab->stub, 0,
371 sizeof (*htab) - offsetof (struct spu_link_hash_table, stub));
373 return &htab->elf.root;
376 /* Free the derived linker hash table. */
379 spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
381 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
383 bfd_hash_table_free (&ret->stub_hash_table);
384 _bfd_generic_link_hash_table_free (hash);
387 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
388 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
389 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
392 get_sym_h (struct elf_link_hash_entry **hp,
393 Elf_Internal_Sym **symp,
395 Elf_Internal_Sym **locsymsp,
396 unsigned long r_symndx,
399 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
401 if (r_symndx >= symtab_hdr->sh_info)
403 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
404 struct elf_link_hash_entry *h;
406 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
407 while (h->root.type == bfd_link_hash_indirect
408 || h->root.type == bfd_link_hash_warning)
409 h = (struct elf_link_hash_entry *) h->root.u.i.link;
419 asection *symsec = NULL;
420 if (h->root.type == bfd_link_hash_defined
421 || h->root.type == bfd_link_hash_defweak)
422 symsec = h->root.u.def.section;
428 Elf_Internal_Sym *sym;
429 Elf_Internal_Sym *locsyms = *locsymsp;
433 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
436 size_t symcount = symtab_hdr->sh_info;
438 /* If we are reading symbols into the contents, then
439 read the global syms too. This is done to cache
440 syms for later stack analysis. */
441 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
442 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
443 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
450 sym = locsyms + r_symndx;
460 asection *symsec = NULL;
461 if ((sym->st_shndx != SHN_UNDEF
462 && sym->st_shndx < SHN_LORESERVE)
463 || sym->st_shndx > SHN_HIRESERVE)
464 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
472 /* Build a name for an entry in the stub hash table. We can't use a
473 local symbol name because ld -r might generate duplicate local symbols. */
476 spu_stub_name (const asection *sym_sec,
477 const struct elf_link_hash_entry *h,
478 const Elf_Internal_Rela *rel)
485 len = strlen (h->root.root.string) + 1 + 8 + 1;
486 stub_name = bfd_malloc (len);
487 if (stub_name == NULL)
490 sprintf (stub_name, "%s+%x",
492 (int) rel->r_addend & 0xffffffff);
497 len = 8 + 1 + 8 + 1 + 8 + 1;
498 stub_name = bfd_malloc (len);
499 if (stub_name == NULL)
502 sprintf (stub_name, "%x:%x+%x",
503 sym_sec->id & 0xffffffff,
504 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
505 (int) rel->r_addend & 0xffffffff);
506 len = strlen (stub_name);
509 if (stub_name[len - 2] == '+'
510 && stub_name[len - 1] == '0'
511 && stub_name[len] == 0)
512 stub_name[len - 2] = 0;
517 /* Create the note section if not already present. This is done early so
518 that the linker maps the sections to the right place in the output. */
521 spu_elf_create_sections (bfd *output_bfd,
522 struct bfd_link_info *info,
527 struct spu_link_hash_table *htab = spu_hash_table (info);
529 /* Stash some options away where we can get at them later. */
530 htab->stack_analysis = stack_analysis;
531 htab->emit_stack_syms = emit_stack_syms;
533 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->next)
534 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
539 /* Make SPU_PTNOTE_SPUNAME section. */
546 ibfd = info->input_bfds;
547 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
548 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
550 || !bfd_set_section_alignment (ibfd, s, 4))
553 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
554 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
555 size += (name_len + 3) & -4;
557 if (!bfd_set_section_size (ibfd, s, size))
560 data = bfd_zalloc (ibfd, size);
564 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
565 bfd_put_32 (ibfd, name_len, data + 4);
566 bfd_put_32 (ibfd, 1, data + 8);
567 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
568 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
569 bfd_get_filename (output_bfd), name_len);
576 /* qsort predicate to sort sections by vma. */
579 sort_sections (const void *a, const void *b)
581 const asection *const *s1 = a;
582 const asection *const *s2 = b;
583 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
586 return delta < 0 ? -1 : 1;
588 return (*s1)->index - (*s2)->index;
591 /* Identify overlays in the output bfd, and number them. */
594 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
596 struct spu_link_hash_table *htab = spu_hash_table (info);
597 asection **alloc_sec;
598 unsigned int i, n, ovl_index, num_buf;
602 if (output_bfd->section_count < 2)
605 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
606 if (alloc_sec == NULL)
609 /* Pick out all the alloced sections. */
610 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
611 if ((s->flags & SEC_ALLOC) != 0
612 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
622 /* Sort them by vma. */
623 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
625 /* Look for overlapping vmas. Any with overlap must be overlays.
626 Count them. Also count the number of overlay regions and for
627 each region save a section from that region with the lowest vma
628 and another section with the highest end vma. */
629 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
630 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
633 if (s->vma < ovl_end)
635 asection *s0 = alloc_sec[i - 1];
637 if (spu_elf_section_data (s0)->ovl_index == 0)
639 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
640 alloc_sec[num_buf * 2] = s0;
641 alloc_sec[num_buf * 2 + 1] = s0;
644 spu_elf_section_data (s)->ovl_index = ++ovl_index;
645 if (ovl_end < s->vma + s->size)
647 ovl_end = s->vma + s->size;
648 alloc_sec[num_buf * 2 - 1] = s;
652 ovl_end = s->vma + s->size;
655 htab->num_overlays = ovl_index;
656 htab->num_buf = num_buf;
663 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
664 if (alloc_sec == NULL)
667 htab->ovl_region = alloc_sec;
671 /* One of these per stub. */
672 #define SIZEOF_STUB1 8
673 #define ILA_79 0x4200004f /* ila $79,function_address */
674 #define BR 0x32000000 /* br stub2 */
676 /* One of these per overlay. */
677 #define SIZEOF_STUB2 8
678 #define ILA_78 0x4200004e /* ila $78,overlay_number */
680 #define NOP 0x40200000
682 /* Return true for all relative and absolute branch instructions.
690 brhnz 00100011 0.. */
693 is_branch (const unsigned char *insn)
695 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
698 /* Return true for branch hint instructions.
703 is_hint (const unsigned char *insn)
705 return (insn[0] & 0xfc) == 0x10;
708 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
711 needs_ovl_stub (const char *sym_name,
713 asection *input_section,
714 struct spu_link_hash_table *htab,
715 bfd_boolean is_branch)
717 if (htab->num_overlays == 0)
721 || sym_sec->output_section == NULL
722 || spu_elf_section_data (sym_sec->output_section) == NULL)
725 /* setjmp always goes via an overlay stub, because then the return
726 and hence the longjmp goes via __ovly_return. That magically
727 makes setjmp/longjmp between overlays work. */
728 if (strncmp (sym_name, "setjmp", 6) == 0
729 && (sym_name[6] == '\0' || sym_name[6] == '@'))
732 /* Usually, symbols in non-overlay sections don't need stubs. */
733 if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
734 && !htab->non_overlay_stubs)
737 /* A reference from some other section to a symbol in an overlay
738 section needs a stub. */
739 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
740 != spu_elf_section_data (input_section->output_section)->ovl_index)
743 /* If this insn isn't a branch then we are possibly taking the
744 address of a function and passing it out somehow. */
749 struct bfd_hash_table *stub_hash_table;
750 struct spu_stub_hash_entry **sh;
755 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
759 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
761 /* Symbols starting with _SPUEAR_ need a stub because they may be
762 invoked by the PPU. */
763 if ((h->root.type == bfd_link_hash_defined
764 || h->root.type == bfd_link_hash_defweak)
766 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
768 struct stubarr *stubs = inf;
769 static Elf_Internal_Rela zero_rel;
770 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
771 struct spu_stub_hash_entry *sh;
773 if (stub_name == NULL)
779 sh = (struct spu_stub_hash_entry *)
780 bfd_hash_lookup (stubs->stub_hash_table, stub_name, TRUE, FALSE);
787 /* If this entry isn't new, we already have a stub. */
788 if (sh->target_section != NULL)
794 sh->target_section = h->root.u.def.section;
795 sh->target_off = h->root.u.def.value;
802 /* Called via bfd_hash_traverse to set up pointers to all symbols
803 in the stub hash table. */
806 populate_stubs (struct bfd_hash_entry *bh, void *inf)
808 struct stubarr *stubs = inf;
810 stubs->sh[--stubs->count] = (struct spu_stub_hash_entry *) bh;
814 /* qsort predicate to sort stubs by overlay number. */
817 sort_stubs (const void *a, const void *b)
819 const struct spu_stub_hash_entry *const *sa = a;
820 const struct spu_stub_hash_entry *const *sb = b;
824 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
825 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
829 d = ((*sa)->target_section->output_section->vma
830 + (*sa)->target_section->output_offset
832 - (*sb)->target_section->output_section->vma
833 - (*sb)->target_section->output_offset
834 - (*sb)->target_off);
836 return d < 0 ? -1 : 1;
838 /* Two functions at the same address. Aliases perhaps. */
839 i = strcmp ((*sb)->root.string, (*sa)->root.string);
844 /* Allocate space for overlay call and return stubs. */
847 spu_elf_size_stubs (bfd *output_bfd,
848 struct bfd_link_info *info,
849 int non_overlay_stubs,
855 struct spu_link_hash_table *htab = spu_hash_table (info);
857 struct stubarr stubs;
861 htab->non_overlay_stubs = non_overlay_stubs;
862 stubs.stub_hash_table = &htab->stub_hash_table;
865 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
867 extern const bfd_target bfd_elf32_spu_vec;
868 Elf_Internal_Shdr *symtab_hdr;
870 Elf_Internal_Sym *local_syms = NULL;
873 if (ibfd->xvec != &bfd_elf32_spu_vec)
876 /* We'll need the symbol table in a second. */
877 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
878 if (symtab_hdr->sh_info == 0)
881 /* Arrange to read and keep global syms for later stack analysis. */
884 psyms = &symtab_hdr->contents;
886 /* Walk over each section attached to the input bfd. */
887 for (section = ibfd->sections; section != NULL; section = section->next)
889 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
891 /* If there aren't any relocs, then there's nothing more to do. */
892 if ((section->flags & SEC_RELOC) == 0
893 || (section->flags & SEC_ALLOC) == 0
894 || (section->flags & SEC_LOAD) == 0
895 || section->reloc_count == 0)
898 /* If this section is a link-once section that will be
899 discarded, then don't create any stubs. */
900 if (section->output_section == NULL
901 || section->output_section->owner != output_bfd)
904 /* Get the relocs. */
906 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
908 if (internal_relocs == NULL)
909 goto error_ret_free_local;
911 /* Now examine each relocation. */
912 irela = internal_relocs;
913 irelaend = irela + section->reloc_count;
914 for (; irela < irelaend; irela++)
916 enum elf_spu_reloc_type r_type;
919 Elf_Internal_Sym *sym;
920 struct elf_link_hash_entry *h;
921 const char *sym_name;
923 struct spu_stub_hash_entry *sh;
924 unsigned int sym_type;
925 enum _insn_type { non_branch, branch, call } insn_type;
927 r_type = ELF32_R_TYPE (irela->r_info);
928 r_indx = ELF32_R_SYM (irela->r_info);
930 if (r_type >= R_SPU_max)
932 bfd_set_error (bfd_error_bad_value);
933 goto error_ret_free_internal;
936 /* Determine the reloc target section. */
937 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
938 goto error_ret_free_internal;
941 || sym_sec->output_section == NULL
942 || sym_sec->output_section->owner != output_bfd)
945 /* Ensure no stubs for user supplied overlay manager syms. */
947 && (strcmp (h->root.root.string, "__ovly_load") == 0
948 || strcmp (h->root.root.string, "__ovly_return") == 0))
951 insn_type = non_branch;
952 if (r_type == R_SPU_REL16
953 || r_type == R_SPU_ADDR16)
955 unsigned char insn[4];
957 if (!bfd_get_section_contents (ibfd, section, insn,
959 goto error_ret_free_internal;
961 if (is_branch (insn) || is_hint (insn))
964 if ((insn[0] & 0xfd) == 0x31)
969 /* We are only interested in function symbols. */
973 sym_name = h->root.root.string;
977 sym_type = ELF_ST_TYPE (sym->st_info);
978 sym_name = bfd_elf_sym_name (sym_sec->owner,
983 if (sym_type != STT_FUNC)
985 /* It's common for people to write assembly and forget
986 to give function symbols the right type. Handle
987 calls to such symbols, but warn so that (hopefully)
988 people will fix their code. We need the symbol
989 type to be correct to distinguish function pointer
990 initialisation from other pointer initialisation. */
991 if (insn_type == call)
992 (*_bfd_error_handler) (_("warning: call to non-function"
993 " symbol %s defined in %B"),
994 sym_sec->owner, sym_name);
999 if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
1000 insn_type != non_branch))
1003 stub_name = spu_stub_name (sym_sec, h, irela);
1004 if (stub_name == NULL)
1005 goto error_ret_free_internal;
1007 sh = (struct spu_stub_hash_entry *)
1008 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
1013 error_ret_free_internal:
1014 if (elf_section_data (section)->relocs != internal_relocs)
1015 free (internal_relocs);
1016 error_ret_free_local:
1017 if (local_syms != NULL
1018 && (symtab_hdr->contents
1019 != (unsigned char *) local_syms))
1024 /* If this entry isn't new, we already have a stub. */
1025 if (sh->target_section != NULL)
1031 sh->target_section = sym_sec;
1033 sh->target_off = h->root.u.def.value;
1035 sh->target_off = sym->st_value;
1036 sh->target_off += irela->r_addend;
1041 /* We're done with the internal relocs, free them. */
1042 if (elf_section_data (section)->relocs != internal_relocs)
1043 free (internal_relocs);
1046 if (local_syms != NULL
1047 && symtab_hdr->contents != (unsigned char *) local_syms)
1049 if (!info->keep_memory)
1052 symtab_hdr->contents = (unsigned char *) local_syms;
1056 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, &stubs);
1061 if (stubs.count == 0)
1064 ibfd = info->input_bfds;
1065 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1066 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1067 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1069 if (htab->stub == NULL
1070 || !bfd_set_section_alignment (ibfd, htab->stub, 2))
1073 flags = (SEC_ALLOC | SEC_LOAD
1074 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1075 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1076 *ovtab = htab->ovtab;
1077 if (htab->ovtab == NULL
1078 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1081 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1083 || !bfd_set_section_alignment (ibfd, *toe, 4))
1087 /* Retrieve all the stubs and sort. */
1088 stubs.sh = bfd_malloc (stubs.count * sizeof (*stubs.sh));
1089 if (stubs.sh == NULL)
1092 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, &stubs);
1093 BFD_ASSERT (stubs.count == 0);
1096 qsort (stubs.sh, stubs.count, sizeof (*stubs.sh), sort_stubs);
1098 /* Now that the stubs are sorted, place them in the stub section.
1099 Stubs are grouped per overlay
1113 for (i = 0; i < stubs.count; i++)
1115 if (spu_elf_section_data (stubs.sh[group]->target_section
1116 ->output_section)->ovl_index
1117 != spu_elf_section_data (stubs.sh[i]->target_section
1118 ->output_section)->ovl_index)
1120 htab->stub->size += SIZEOF_STUB2;
1121 for (; group != i; group++)
1122 stubs.sh[group]->delta
1123 = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1126 || ((stubs.sh[i - 1]->target_section->output_section->vma
1127 + stubs.sh[i - 1]->target_section->output_offset
1128 + stubs.sh[i - 1]->target_off)
1129 != (stubs.sh[i]->target_section->output_section->vma
1130 + stubs.sh[i]->target_section->output_offset
1131 + stubs.sh[i]->target_off)))
1133 stubs.sh[i]->off = htab->stub->size;
1134 htab->stub->size += SIZEOF_STUB1;
1137 stubs.sh[i]->off = stubs.sh[i - 1]->off;
1140 htab->stub->size += SIZEOF_STUB2;
1141 for (; group != i; group++)
1142 stubs.sh[group]->delta = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1144 /* htab->ovtab consists of two arrays.
1154 . } _ovly_buf_table[]; */
1156 htab->ovtab->alignment_power = 4;
1157 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1162 /* Functions to handle embedded spu_ovl.o object. */
1165 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1171 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1177 struct _ovl_stream *os;
1181 os = (struct _ovl_stream *) stream;
1182 max = (const char *) os->end - (const char *) os->start;
1184 if ((ufile_ptr) offset >= max)
1188 if (count > max - offset)
1189 count = max - offset;
1191 memcpy (buf, (const char *) os->start + offset, count);
1196 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1198 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1205 return *ovl_bfd != NULL;
1208 /* Fill in the ila and br for a stub. On the last stub for a group,
1209 write the stub that sets the overlay number too. */
1212 write_one_stub (struct bfd_hash_entry *bh, void *inf)
1214 struct spu_stub_hash_entry *ent = (struct spu_stub_hash_entry *) bh;
1215 struct spu_link_hash_table *htab = inf;
1216 asection *sec = htab->stub;
1217 asection *s = ent->target_section;
1221 val = ent->target_off + s->output_offset + s->output_section->vma;
1222 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1223 sec->contents + ent->off);
1224 val = ent->delta + 4;
1225 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1226 sec->contents + ent->off + 4);
1228 /* If this is the last stub of this group, write stub2. */
1229 if (ent->delta == 0)
1231 bfd_put_32 (sec->owner, NOP,
1232 sec->contents + ent->off + 4);
1234 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1235 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1236 sec->contents + ent->off + 8);
1238 val = (htab->ovly_load->root.u.def.section->output_section->vma
1239 + htab->ovly_load->root.u.def.section->output_offset
1240 + htab->ovly_load->root.u.def.value
1241 - (sec->output_section->vma
1242 + sec->output_offset
1245 if (val + 0x20000 >= 0x40000)
1246 htab->stub_overflow = TRUE;
1248 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1249 sec->contents + ent->off + 12);
1252 if (htab->emit_stub_syms)
1254 struct elf_link_hash_entry *h;
1258 len1 = sizeof ("00000000.ovl_call.") - 1;
1259 len2 = strlen (ent->root.string);
1260 name = bfd_malloc (len1 + len2 + 1);
1263 memcpy (name, "00000000.ovl_call.", len1);
1264 memcpy (name + len1, ent->root.string, len2 + 1);
1265 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1269 if (h->root.type == bfd_link_hash_new)
1271 h->root.type = bfd_link_hash_defined;
1272 h->root.u.def.section = sec;
1273 h->root.u.def.value = ent->off;
1274 h->size = (ent->delta == 0
1275 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1279 h->ref_regular_nonweak = 1;
1280 h->forced_local = 1;
1288 /* Define an STT_OBJECT symbol. */
1290 static struct elf_link_hash_entry *
1291 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1293 struct elf_link_hash_entry *h;
1295 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1299 if (h->root.type != bfd_link_hash_defined
1302 h->root.type = bfd_link_hash_defined;
1303 h->root.u.def.section = htab->ovtab;
1304 h->type = STT_OBJECT;
1307 h->ref_regular_nonweak = 1;
1312 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1313 h->root.u.def.section->owner,
1314 h->root.root.string);
1315 bfd_set_error (bfd_error_bad_value);
1322 /* Fill in all stubs and the overlay tables. */
1325 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1327 struct spu_link_hash_table *htab = spu_hash_table (info);
1328 struct elf_link_hash_entry *h;
1334 htab->emit_stub_syms = emit_syms;
1335 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1336 if (htab->stub->contents == NULL)
1339 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1340 htab->ovly_load = h;
1341 BFD_ASSERT (h != NULL
1342 && (h->root.type == bfd_link_hash_defined
1343 || h->root.type == bfd_link_hash_defweak)
1346 s = h->root.u.def.section->output_section;
1347 if (spu_elf_section_data (s)->ovl_index)
1349 (*_bfd_error_handler) (_("%s in overlay section"),
1350 h->root.u.def.section->owner);
1351 bfd_set_error (bfd_error_bad_value);
1355 /* Write out all the stubs. */
1356 bfd_hash_traverse (&htab->stub_hash_table, write_one_stub, htab);
1358 if (htab->stub_overflow)
1360 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1361 bfd_set_error (bfd_error_bad_value);
1365 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1366 if (htab->ovtab->contents == NULL)
1369 /* Write out _ovly_table. */
1370 p = htab->ovtab->contents;
1371 obfd = htab->ovtab->output_section->owner;
1372 for (s = obfd->sections; s != NULL; s = s->next)
1374 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1378 unsigned int lo, hi, mid;
1379 unsigned long off = (ovl_index - 1) * 16;
1380 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1381 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1382 /* file_off written later in spu_elf_modify_program_headers. */
1388 mid = (lo + hi) >> 1;
1389 if (htab->ovl_region[2 * mid + 1]->vma
1390 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1392 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1396 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1400 BFD_ASSERT (lo < hi);
1404 /* Write out _ovly_buf_table. */
1405 p = htab->ovtab->contents + htab->num_overlays * 16;
1406 for (i = 0; i < htab->num_buf; i++)
1408 bfd_put_32 (htab->ovtab->owner, 0, p);
1412 h = define_ovtab_symbol (htab, "_ovly_table");
1415 h->root.u.def.value = 0;
1416 h->size = htab->num_overlays * 16;
1418 h = define_ovtab_symbol (htab, "_ovly_table_end");
1421 h->root.u.def.value = htab->num_overlays * 16;
1424 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1427 h->root.u.def.value = htab->num_overlays * 16;
1428 h->size = htab->num_buf * 4;
1430 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1433 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1436 h = define_ovtab_symbol (htab, "_EAR_");
1439 h->root.u.def.section = toe;
1440 h->root.u.def.value = 0;
1446 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1447 Search for stack adjusting insns, and return the sp delta. */
1450 find_function_stack_adjust (asection *sec, bfd_vma offset)
1455 memset (reg, 0, sizeof (reg));
1456 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1458 unsigned char buf[4];
1462 /* Assume no relocs on stack adjusing insns. */
1463 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1466 if (buf[0] == 0x24 /* stqd */)
1470 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1471 /* Partly decoded immediate field. */
1472 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1474 if (buf[0] == 0x1c /* ai */)
1477 imm = (imm ^ 0x200) - 0x200;
1478 reg[rt] = reg[ra] + imm;
1480 if (rt == 1 /* sp */)
1487 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1489 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1491 reg[rt] = reg[ra] + reg[rb];
1495 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1497 if (buf[0] >= 0x42 /* ila */)
1498 imm |= (buf[0] & 1) << 17;
1503 if (buf[0] == 0x40 /* il */)
1505 if ((buf[1] & 0x80) == 0)
1507 imm = (imm ^ 0x8000) - 0x8000;
1509 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1515 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1517 reg[rt] |= imm & 0xffff;
1520 else if (buf[0] == 0x04 /* ori */)
1523 imm = (imm ^ 0x200) - 0x200;
1524 reg[rt] = reg[ra] | imm;
1527 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1528 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1530 /* Used in pic reg load. Say rt is trashed. */
1534 else if (is_branch (buf))
1535 /* If we hit a branch then we must be out of the prologue. */
1544 /* qsort predicate to sort symbols by section and value. */
1546 static Elf_Internal_Sym *sort_syms_syms;
1547 static asection **sort_syms_psecs;
1550 sort_syms (const void *a, const void *b)
1552 Elf_Internal_Sym *const *s1 = a;
1553 Elf_Internal_Sym *const *s2 = b;
1554 asection *sec1,*sec2;
1555 bfd_signed_vma delta;
1557 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1558 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1561 return sec1->index - sec2->index;
1563 delta = (*s1)->st_value - (*s2)->st_value;
1565 return delta < 0 ? -1 : 1;
1567 delta = (*s2)->st_size - (*s1)->st_size;
1569 return delta < 0 ? -1 : 1;
1571 return *s1 < *s2 ? -1 : 1;
1576 struct function_info *fun;
1577 struct call_info *next;
1581 struct function_info
1583 /* List of functions called. Also branches to hot/cold part of
1585 struct call_info *call_list;
1586 /* For hot/cold part of function, point to owner. */
1587 struct function_info *start;
1588 /* Symbol at start of function. */
1590 Elf_Internal_Sym *sym;
1591 struct elf_link_hash_entry *h;
1593 /* Function section. */
1595 /* Address range of (this part of) function. */
1599 /* Set if global symbol. */
1600 unsigned int global : 1;
1601 /* Set if known to be start of function (as distinct from a hunk
1602 in hot/cold section. */
1603 unsigned int is_func : 1;
1604 /* Flags used during call tree traversal. */
1605 unsigned int visit1 : 1;
1606 unsigned int non_root : 1;
1607 unsigned int visit2 : 1;
1608 unsigned int marking : 1;
1609 unsigned int visit3 : 1;
1612 struct spu_elf_stack_info
1616 /* Variable size array describing functions, one per contiguous
1617 address range belonging to a function. */
1618 struct function_info fun[1];
1621 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1622 entries for section SEC. */
1624 static struct spu_elf_stack_info *
1625 alloc_stack_info (asection *sec, int max_fun)
1627 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1630 amt = sizeof (struct spu_elf_stack_info);
1631 amt += (max_fun - 1) * sizeof (struct function_info);
1632 sec_data->stack_info = bfd_zmalloc (amt);
1633 if (sec_data->stack_info != NULL)
1634 sec_data->stack_info->max_fun = max_fun;
1635 return sec_data->stack_info;
1638 /* Add a new struct function_info describing a (part of a) function
1639 starting at SYM_H. Keep the array sorted by address. */
1641 static struct function_info *
1642 maybe_insert_function (asection *sec,
1645 bfd_boolean is_func)
1647 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1648 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1654 sinfo = alloc_stack_info (sec, 20);
1661 Elf_Internal_Sym *sym = sym_h;
1662 off = sym->st_value;
1663 size = sym->st_size;
1667 struct elf_link_hash_entry *h = sym_h;
1668 off = h->root.u.def.value;
1672 for (i = sinfo->num_fun; --i >= 0; )
1673 if (sinfo->fun[i].lo <= off)
1678 /* Don't add another entry for an alias, but do update some
1680 if (sinfo->fun[i].lo == off)
1682 /* Prefer globals over local syms. */
1683 if (global && !sinfo->fun[i].global)
1685 sinfo->fun[i].global = TRUE;
1686 sinfo->fun[i].u.h = sym_h;
1689 sinfo->fun[i].is_func = TRUE;
1690 return &sinfo->fun[i];
1692 /* Ignore a zero-size symbol inside an existing function. */
1693 else if (sinfo->fun[i].hi > off && size == 0)
1694 return &sinfo->fun[i];
1697 if (++i < sinfo->num_fun)
1698 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1699 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1700 else if (i >= sinfo->max_fun)
1702 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1703 bfd_size_type old = amt;
1705 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1706 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1707 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1708 sinfo = bfd_realloc (sinfo, amt);
1711 memset ((char *) sinfo + old, 0, amt - old);
1712 sec_data->stack_info = sinfo;
1714 sinfo->fun[i].is_func = is_func;
1715 sinfo->fun[i].global = global;
1716 sinfo->fun[i].sec = sec;
1718 sinfo->fun[i].u.h = sym_h;
1720 sinfo->fun[i].u.sym = sym_h;
1721 sinfo->fun[i].lo = off;
1722 sinfo->fun[i].hi = off + size;
1723 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1724 sinfo->num_fun += 1;
1725 return &sinfo->fun[i];
1728 /* Return the name of FUN. */
1731 func_name (struct function_info *fun)
1735 Elf_Internal_Shdr *symtab_hdr;
1737 while (fun->start != NULL)
1741 return fun->u.h->root.root.string;
1744 if (fun->u.sym->st_name == 0)
1746 size_t len = strlen (sec->name);
1747 char *name = bfd_malloc (len + 10);
1750 sprintf (name, "%s+%lx", sec->name,
1751 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1755 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1756 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1759 /* Read the instruction at OFF in SEC. Return true iff the instruction
1760 is a nop, lnop, or stop 0 (all zero insn). */
1763 is_nop (asection *sec, bfd_vma off)
1765 unsigned char insn[4];
1767 if (off + 4 > sec->size
1768 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1770 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1772 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1777 /* Extend the range of FUN to cover nop padding up to LIMIT.
1778 Return TRUE iff some instruction other than a NOP was found. */
1781 insns_at_end (struct function_info *fun, bfd_vma limit)
1783 bfd_vma off = (fun->hi + 3) & -4;
1785 while (off < limit && is_nop (fun->sec, off))
1796 /* Check and fix overlapping function ranges. Return TRUE iff there
1797 are gaps in the current info we have about functions in SEC. */
1800 check_function_ranges (asection *sec, struct bfd_link_info *info)
1802 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1803 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1805 bfd_boolean gaps = FALSE;
1810 for (i = 1; i < sinfo->num_fun; i++)
1811 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1813 /* Fix overlapping symbols. */
1814 const char *f1 = func_name (&sinfo->fun[i - 1]);
1815 const char *f2 = func_name (&sinfo->fun[i]);
1817 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1818 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1820 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1823 if (sinfo->num_fun == 0)
1827 if (sinfo->fun[0].lo != 0)
1829 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1831 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1833 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1834 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1836 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1842 /* Search current function info for a function that contains address
1843 OFFSET in section SEC. */
1845 static struct function_info *
1846 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1848 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1849 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1853 hi = sinfo->num_fun;
1856 mid = (lo + hi) / 2;
1857 if (offset < sinfo->fun[mid].lo)
1859 else if (offset >= sinfo->fun[mid].hi)
1862 return &sinfo->fun[mid];
1864 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1869 /* Add CALLEE to CALLER call list if not already present. */
1872 insert_callee (struct function_info *caller, struct call_info *callee)
1874 struct call_info *p;
1875 for (p = caller->call_list; p != NULL; p = p->next)
1876 if (p->fun == callee->fun)
1878 /* Tail calls use less stack than normal calls. Retain entry
1879 for normal call over one for tail call. */
1880 if (p->is_tail > callee->is_tail)
1881 p->is_tail = callee->is_tail;
1884 callee->next = caller->call_list;
1885 caller->call_list = callee;
1889 /* Rummage through the relocs for SEC, looking for function calls.
1890 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1891 mark destination symbols on calls as being functions. Also
1892 look at branches, which may be tail calls or go to hot/cold
1893 section part of same function. */
1896 mark_functions_via_relocs (asection *sec,
1897 struct bfd_link_info *info,
1900 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1901 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1902 Elf_Internal_Sym *syms;
1904 static bfd_boolean warned;
1906 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1908 if (internal_relocs == NULL)
1911 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1912 psyms = &symtab_hdr->contents;
1913 syms = *(Elf_Internal_Sym **) psyms;
1914 irela = internal_relocs;
1915 irelaend = irela + sec->reloc_count;
1916 for (; irela < irelaend; irela++)
1918 enum elf_spu_reloc_type r_type;
1919 unsigned int r_indx;
1921 Elf_Internal_Sym *sym;
1922 struct elf_link_hash_entry *h;
1924 unsigned char insn[4];
1925 bfd_boolean is_call;
1926 struct function_info *caller;
1927 struct call_info *callee;
1929 r_type = ELF32_R_TYPE (irela->r_info);
1930 if (r_type != R_SPU_REL16
1931 && r_type != R_SPU_ADDR16)
1934 r_indx = ELF32_R_SYM (irela->r_info);
1935 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1939 || sym_sec->output_section == NULL
1940 || sym_sec->output_section->owner != sec->output_section->owner)
1943 if (!bfd_get_section_contents (sec->owner, sec, insn,
1944 irela->r_offset, 4))
1946 if (!is_branch (insn))
1949 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1950 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1954 if (!call_tree || !warned)
1955 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1956 " %B(%A), stack analysis incomplete\n"),
1957 sec->owner, sec, irela->r_offset,
1958 sym_sec->owner, sym_sec);
1962 is_call = (insn[0] & 0xfd) == 0x31;
1965 val = h->root.u.def.value;
1967 val = sym->st_value;
1968 val += irela->r_addend;
1972 struct function_info *fun;
1974 if (irela->r_addend != 0)
1976 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1979 fake->st_value = val;
1981 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
1985 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
1987 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
1990 if (irela->r_addend != 0
1991 && fun->u.sym != sym)
1996 caller = find_function (sec, irela->r_offset, info);
1999 callee = bfd_malloc (sizeof *callee);
2003 callee->fun = find_function (sym_sec, val, info);
2004 if (callee->fun == NULL)
2006 callee->is_tail = !is_call;
2007 if (!insert_callee (caller, callee))
2010 && !callee->fun->is_func
2011 && callee->fun->stack == 0)
2013 /* This is either a tail call or a branch from one part of
2014 the function to another, ie. hot/cold section. If the
2015 destination has been called by some other function then
2016 it is a separate function. We also assume that functions
2017 are not split across input files. */
2018 if (callee->fun->start != NULL
2019 || sec->owner != sym_sec->owner)
2021 callee->fun->start = NULL;
2022 callee->fun->is_func = TRUE;
2025 callee->fun->start = caller;
2032 /* Handle something like .init or .fini, which has a piece of a function.
2033 These sections are pasted together to form a single function. */
2036 pasted_function (asection *sec, struct bfd_link_info *info)
2038 struct bfd_link_order *l;
2039 struct _spu_elf_section_data *sec_data;
2040 struct spu_elf_stack_info *sinfo;
2041 Elf_Internal_Sym *fake;
2042 struct function_info *fun, *fun_start;
2044 fake = bfd_zmalloc (sizeof (*fake));
2048 fake->st_size = sec->size;
2050 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2051 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2055 /* Find a function immediately preceding this section. */
2057 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2059 if (l->u.indirect.section == sec)
2061 if (fun_start != NULL)
2063 if (fun_start->start)
2064 fun_start = fun_start->start;
2065 fun->start = fun_start;
2069 if (l->type == bfd_indirect_link_order
2070 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2071 && (sinfo = sec_data->stack_info) != NULL
2072 && sinfo->num_fun != 0)
2073 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2076 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2080 /* We're only interested in code sections. */
2083 interesting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2085 return (s != htab->stub
2086 && s->output_section != NULL
2087 && s->output_section->owner == obfd
2088 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2089 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2093 /* Map address ranges in code sections to functions. */
2096 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2098 struct spu_link_hash_table *htab = spu_hash_table (info);
2101 Elf_Internal_Sym ***psym_arr;
2102 asection ***sec_arr;
2103 bfd_boolean gaps = FALSE;
2106 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2109 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2110 if (psym_arr == NULL)
2112 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2113 if (sec_arr == NULL)
2117 for (ibfd = info->input_bfds, bfd_idx = 0;
2119 ibfd = ibfd->link_next, bfd_idx++)
2121 extern const bfd_target bfd_elf32_spu_vec;
2122 Elf_Internal_Shdr *symtab_hdr;
2125 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2126 asection **psecs, **p;
2128 if (ibfd->xvec != &bfd_elf32_spu_vec)
2131 /* Read all the symbols. */
2132 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2133 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2137 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2140 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2142 symtab_hdr->contents = (void *) syms;
2147 /* Select defined function symbols that are going to be output. */
2148 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2151 psym_arr[bfd_idx] = psyms;
2152 psecs = bfd_malloc (symcount * sizeof (*psecs));
2155 sec_arr[bfd_idx] = psecs;
2156 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2157 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2158 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2162 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2163 if (s != NULL && interesting_section (s, output_bfd, htab))
2166 symcount = psy - psyms;
2169 /* Sort them by section and offset within section. */
2170 sort_syms_syms = syms;
2171 sort_syms_psecs = psecs;
2172 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2174 /* Now inspect the function symbols. */
2175 for (psy = psyms; psy < psyms + symcount; )
2177 asection *s = psecs[*psy - syms];
2178 Elf_Internal_Sym **psy2;
2180 for (psy2 = psy; ++psy2 < psyms + symcount; )
2181 if (psecs[*psy2 - syms] != s)
2184 if (!alloc_stack_info (s, psy2 - psy))
2189 /* First install info about properly typed and sized functions.
2190 In an ideal world this will cover all code sections, except
2191 when partitioning functions into hot and cold sections,
2192 and the horrible pasted together .init and .fini functions. */
2193 for (psy = psyms; psy < psyms + symcount; ++psy)
2196 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2198 asection *s = psecs[sy - syms];
2199 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2204 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2205 if (interesting_section (sec, output_bfd, htab))
2206 gaps |= check_function_ranges (sec, info);
2211 /* See if we can discover more function symbols by looking at
2213 for (ibfd = info->input_bfds, bfd_idx = 0;
2215 ibfd = ibfd->link_next, bfd_idx++)
2219 if (psym_arr[bfd_idx] == NULL)
2222 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2223 if (interesting_section (sec, output_bfd, htab)
2224 && sec->reloc_count != 0)
2226 if (!mark_functions_via_relocs (sec, info, FALSE))
2231 for (ibfd = info->input_bfds, bfd_idx = 0;
2233 ibfd = ibfd->link_next, bfd_idx++)
2235 Elf_Internal_Shdr *symtab_hdr;
2237 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2240 if ((psyms = psym_arr[bfd_idx]) == NULL)
2243 psecs = sec_arr[bfd_idx];
2245 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2246 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2249 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2250 if (interesting_section (sec, output_bfd, htab))
2251 gaps |= check_function_ranges (sec, info);
2255 /* Finally, install all globals. */
2256 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2260 s = psecs[sy - syms];
2262 /* Global syms might be improperly typed functions. */
2263 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2264 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2266 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2271 /* Some of the symbols we've installed as marking the
2272 beginning of functions may have a size of zero. Extend
2273 the range of such functions to the beginning of the
2274 next symbol of interest. */
2275 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2276 if (interesting_section (sec, output_bfd, htab))
2278 struct _spu_elf_section_data *sec_data;
2279 struct spu_elf_stack_info *sinfo;
2281 sec_data = spu_elf_section_data (sec);
2282 sinfo = sec_data->stack_info;
2286 bfd_vma hi = sec->size;
2288 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2290 sinfo->fun[fun_idx].hi = hi;
2291 hi = sinfo->fun[fun_idx].lo;
2294 /* No symbols in this section. Must be .init or .fini
2295 or something similar. */
2296 else if (!pasted_function (sec, info))
2302 for (ibfd = info->input_bfds, bfd_idx = 0;
2304 ibfd = ibfd->link_next, bfd_idx++)
2306 if (psym_arr[bfd_idx] == NULL)
2309 free (psym_arr[bfd_idx]);
2310 free (sec_arr[bfd_idx]);
2319 /* Mark nodes in the call graph that are called by some other node. */
2322 mark_non_root (struct function_info *fun)
2324 struct call_info *call;
2327 for (call = fun->call_list; call; call = call->next)
2329 call->fun->non_root = TRUE;
2330 if (!call->fun->visit1)
2331 mark_non_root (call->fun);
2335 /* Remove cycles from the call graph. */
2338 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2340 struct call_info **callp, *call;
2343 fun->marking = TRUE;
2345 callp = &fun->call_list;
2346 while ((call = *callp) != NULL)
2348 if (!call->fun->visit2)
2349 call_graph_traverse (call->fun, info);
2350 else if (call->fun->marking)
2352 const char *f1 = func_name (fun);
2353 const char *f2 = func_name (call->fun);
2355 info->callbacks->info (_("Stack analysis will ignore the call "
2358 *callp = call->next;
2361 callp = &call->next;
2363 fun->marking = FALSE;
2366 /* Populate call_list for each function. */
2369 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2371 struct spu_link_hash_table *htab = spu_hash_table (info);
2374 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2376 extern const bfd_target bfd_elf32_spu_vec;
2379 if (ibfd->xvec != &bfd_elf32_spu_vec)
2382 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2384 if (!interesting_section (sec, output_bfd, htab)
2385 || sec->reloc_count == 0)
2388 if (!mark_functions_via_relocs (sec, info, TRUE))
2392 /* Transfer call info from hot/cold section part of function
2394 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2396 struct _spu_elf_section_data *sec_data;
2397 struct spu_elf_stack_info *sinfo;
2399 if ((sec_data = spu_elf_section_data (sec)) != NULL
2400 && (sinfo = sec_data->stack_info) != NULL)
2403 for (i = 0; i < sinfo->num_fun; ++i)
2405 if (sinfo->fun[i].start != NULL)
2407 struct call_info *call = sinfo->fun[i].call_list;
2409 while (call != NULL)
2411 struct call_info *call_next = call->next;
2412 if (!insert_callee (sinfo->fun[i].start, call))
2416 sinfo->fun[i].call_list = NULL;
2417 sinfo->fun[i].non_root = TRUE;
2424 /* Find the call graph root(s). */
2425 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2427 extern const bfd_target bfd_elf32_spu_vec;
2430 if (ibfd->xvec != &bfd_elf32_spu_vec)
2433 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2435 struct _spu_elf_section_data *sec_data;
2436 struct spu_elf_stack_info *sinfo;
2438 if ((sec_data = spu_elf_section_data (sec)) != NULL
2439 && (sinfo = sec_data->stack_info) != NULL)
2442 for (i = 0; i < sinfo->num_fun; ++i)
2443 if (!sinfo->fun[i].visit1)
2444 mark_non_root (&sinfo->fun[i]);
2449 /* Remove cycles from the call graph. We start from the root node(s)
2450 so that we break cycles in a reasonable place. */
2451 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2453 extern const bfd_target bfd_elf32_spu_vec;
2456 if (ibfd->xvec != &bfd_elf32_spu_vec)
2459 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2461 struct _spu_elf_section_data *sec_data;
2462 struct spu_elf_stack_info *sinfo;
2464 if ((sec_data = spu_elf_section_data (sec)) != NULL
2465 && (sinfo = sec_data->stack_info) != NULL)
2468 for (i = 0; i < sinfo->num_fun; ++i)
2469 if (!sinfo->fun[i].non_root)
2470 call_graph_traverse (&sinfo->fun[i], info);
2478 /* Descend the call graph for FUN, accumulating total stack required. */
2481 sum_stack (struct function_info *fun,
2482 struct bfd_link_info *info,
2483 int emit_stack_syms)
2485 struct call_info *call;
2486 struct function_info *max = NULL;
2487 bfd_vma max_stack = fun->stack;
2494 for (call = fun->call_list; call; call = call->next)
2496 stack = sum_stack (call->fun, info, emit_stack_syms);
2497 /* Include caller stack for normal calls, don't do so for
2498 tail calls. fun->stack here is local stack usage for
2501 stack += fun->stack;
2502 if (max_stack < stack)
2509 f1 = func_name (fun);
2510 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"), f1, fun->stack, max_stack);
2514 info->callbacks->minfo (_(" calls:\n"));
2515 for (call = fun->call_list; call; call = call->next)
2517 const char *f2 = func_name (call->fun);
2518 const char *ann1 = call->fun == max ? "*" : " ";
2519 const char *ann2 = call->is_tail ? "t" : " ";
2521 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2525 /* Now fun->stack holds cumulative stack. */
2526 fun->stack = max_stack;
2529 if (emit_stack_syms)
2531 struct spu_link_hash_table *htab = spu_hash_table (info);
2532 char *name = bfd_malloc (18 + strlen (f1));
2533 struct elf_link_hash_entry *h;
2537 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2538 sprintf (name, "__stack_%s", f1);
2540 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2542 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2545 && (h->root.type == bfd_link_hash_new
2546 || h->root.type == bfd_link_hash_undefined
2547 || h->root.type == bfd_link_hash_undefweak))
2549 h->root.type = bfd_link_hash_defined;
2550 h->root.u.def.section = bfd_abs_section_ptr;
2551 h->root.u.def.value = max_stack;
2556 h->ref_regular_nonweak = 1;
2557 h->forced_local = 1;
2566 /* Provide an estimate of total stack required. */
2569 spu_elf_stack_analysis (bfd *output_bfd,
2570 struct bfd_link_info *info,
2571 int emit_stack_syms)
2574 bfd_vma max_stack = 0;
2576 if (!discover_functions (output_bfd, info))
2579 if (!build_call_tree (output_bfd, info))
2582 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2583 info->callbacks->minfo (_("\nStack size for functions. "
2584 "Annotations: '*' max stack, 't' tail call\n"));
2585 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2587 extern const bfd_target bfd_elf32_spu_vec;
2590 if (ibfd->xvec != &bfd_elf32_spu_vec)
2593 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2595 struct _spu_elf_section_data *sec_data;
2596 struct spu_elf_stack_info *sinfo;
2598 if ((sec_data = spu_elf_section_data (sec)) != NULL
2599 && (sinfo = sec_data->stack_info) != NULL)
2602 for (i = 0; i < sinfo->num_fun; ++i)
2604 if (!sinfo->fun[i].non_root)
2609 stack = sum_stack (&sinfo->fun[i], info,
2611 f1 = func_name (&sinfo->fun[i]);
2612 info->callbacks->info (_(" %s: 0x%v\n"),
2614 if (max_stack < stack)
2622 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2626 /* Perform a final link. */
2629 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2631 struct spu_link_hash_table *htab = spu_hash_table (info);
2633 if (htab->stack_analysis
2634 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2635 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2637 return bfd_elf_final_link (output_bfd, info);
2640 /* Called when not normally emitting relocs, ie. !info->relocatable
2641 and !info->emitrelocations. Returns a count of special relocs
2642 that need to be emitted. */
2645 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2647 unsigned int count = 0;
2648 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2650 for (; relocs < relend; relocs++)
2652 int r_type = ELF32_R_TYPE (relocs->r_info);
2653 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2660 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2663 spu_elf_relocate_section (bfd *output_bfd,
2664 struct bfd_link_info *info,
2666 asection *input_section,
2668 Elf_Internal_Rela *relocs,
2669 Elf_Internal_Sym *local_syms,
2670 asection **local_sections)
2672 Elf_Internal_Shdr *symtab_hdr;
2673 struct elf_link_hash_entry **sym_hashes;
2674 Elf_Internal_Rela *rel, *relend;
2675 struct spu_link_hash_table *htab;
2676 bfd_boolean ret = TRUE;
2677 bfd_boolean emit_these_relocs = FALSE;
2679 htab = spu_hash_table (info);
2680 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2681 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2684 relend = relocs + input_section->reloc_count;
2685 for (; rel < relend; rel++)
2688 reloc_howto_type *howto;
2689 unsigned long r_symndx;
2690 Elf_Internal_Sym *sym;
2692 struct elf_link_hash_entry *h;
2693 const char *sym_name;
2696 bfd_reloc_status_type r;
2697 bfd_boolean unresolved_reloc;
2701 r_symndx = ELF32_R_SYM (rel->r_info);
2702 r_type = ELF32_R_TYPE (rel->r_info);
2703 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2705 emit_these_relocs = TRUE;
2709 howto = elf_howto_table + r_type;
2710 unresolved_reloc = FALSE;
2715 if (r_symndx < symtab_hdr->sh_info)
2717 sym = local_syms + r_symndx;
2718 sec = local_sections[r_symndx];
2719 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2720 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2724 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2725 r_symndx, symtab_hdr, sym_hashes,
2727 unresolved_reloc, warned);
2728 sym_name = h->root.root.string;
2731 if (sec != NULL && elf_discarded_section (sec))
2733 /* For relocs against symbols from removed linkonce sections,
2734 or sections discarded by a linker script, we just want the
2735 section contents zeroed. Avoid any special processing. */
2736 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2742 if (info->relocatable)
2745 if (unresolved_reloc)
2747 (*_bfd_error_handler)
2748 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2750 bfd_get_section_name (input_bfd, input_section),
2751 (long) rel->r_offset,
2757 /* If this symbol is in an overlay area, we may need to relocate
2758 to the overlay stub. */
2759 addend = rel->r_addend;
2760 branch = (is_branch (contents + rel->r_offset)
2761 || is_hint (contents + rel->r_offset));
2762 if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2765 struct spu_stub_hash_entry *sh;
2767 stub_name = spu_stub_name (sec, h, rel);
2768 if (stub_name == NULL)
2771 sh = (struct spu_stub_hash_entry *)
2772 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2775 relocation = (htab->stub->output_section->vma
2776 + htab->stub->output_offset
2783 r = _bfd_final_link_relocate (howto,
2787 rel->r_offset, relocation, addend);
2789 if (r != bfd_reloc_ok)
2791 const char *msg = (const char *) 0;
2795 case bfd_reloc_overflow:
2796 if (!((*info->callbacks->reloc_overflow)
2797 (info, (h ? &h->root : NULL), sym_name, howto->name,
2798 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2802 case bfd_reloc_undefined:
2803 if (!((*info->callbacks->undefined_symbol)
2804 (info, sym_name, input_bfd, input_section,
2805 rel->r_offset, TRUE)))
2809 case bfd_reloc_outofrange:
2810 msg = _("internal error: out of range error");
2813 case bfd_reloc_notsupported:
2814 msg = _("internal error: unsupported relocation error");
2817 case bfd_reloc_dangerous:
2818 msg = _("internal error: dangerous error");
2822 msg = _("internal error: unknown error");
2826 if (!((*info->callbacks->warning)
2827 (info, msg, sym_name, input_bfd, input_section,
2836 && emit_these_relocs
2837 && !info->relocatable
2838 && !info->emitrelocations)
2840 Elf_Internal_Rela *wrel;
2841 Elf_Internal_Shdr *rel_hdr;
2843 wrel = rel = relocs;
2844 relend = relocs + input_section->reloc_count;
2845 for (; rel < relend; rel++)
2849 r_type = ELF32_R_TYPE (rel->r_info);
2850 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2853 input_section->reloc_count = wrel - relocs;
2854 /* Backflips for _bfd_elf_link_output_relocs. */
2855 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2856 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2863 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2866 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2867 const char *sym_name ATTRIBUTE_UNUSED,
2868 Elf_Internal_Sym *sym,
2869 asection *sym_sec ATTRIBUTE_UNUSED,
2870 struct elf_link_hash_entry *h)
2872 struct spu_link_hash_table *htab = spu_hash_table (info);
2874 if (!info->relocatable
2875 && htab->num_overlays != 0
2877 && (h->root.type == bfd_link_hash_defined
2878 || h->root.type == bfd_link_hash_defweak)
2880 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2882 static Elf_Internal_Rela zero_rel;
2883 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2884 struct spu_stub_hash_entry *sh;
2886 if (stub_name == NULL)
2888 sh = (struct spu_stub_hash_entry *)
2889 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2894 = _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2895 htab->stub->output_section);
2896 sym->st_value = (htab->stub->output_section->vma
2897 + htab->stub->output_offset
2904 static int spu_plugin = 0;
2907 spu_elf_plugin (int val)
2912 /* Set ELF header e_type for plugins. */
2915 spu_elf_post_process_headers (bfd *abfd,
2916 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2920 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2922 i_ehdrp->e_type = ET_DYN;
2926 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2927 segments for overlays. */
2930 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2932 struct spu_link_hash_table *htab = spu_hash_table (info);
2933 int extra = htab->num_overlays;
2939 sec = bfd_get_section_by_name (abfd, ".toe");
2940 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2946 /* Remove .toe section from other PT_LOAD segments and put it in
2947 a segment of its own. Put overlays in separate segments too. */
2950 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2953 struct elf_segment_map *m;
2959 toe = bfd_get_section_by_name (abfd, ".toe");
2960 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2961 if (m->p_type == PT_LOAD && m->count > 1)
2962 for (i = 0; i < m->count; i++)
2963 if ((s = m->sections[i]) == toe
2964 || spu_elf_section_data (s)->ovl_index != 0)
2966 struct elf_segment_map *m2;
2969 if (i + 1 < m->count)
2971 amt = sizeof (struct elf_segment_map);
2972 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
2973 m2 = bfd_zalloc (abfd, amt);
2976 m2->count = m->count - (i + 1);
2977 memcpy (m2->sections, m->sections + i + 1,
2978 m2->count * sizeof (m->sections[0]));
2979 m2->p_type = PT_LOAD;
2987 amt = sizeof (struct elf_segment_map);
2988 m2 = bfd_zalloc (abfd, amt);
2991 m2->p_type = PT_LOAD;
2993 m2->sections[0] = s;
3003 /* Check that all loadable section VMAs lie in the range
3004 LO .. HI inclusive. */
3007 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3009 struct elf_segment_map *m;
3012 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3013 if (m->p_type == PT_LOAD)
3014 for (i = 0; i < m->count; i++)
3015 if (m->sections[i]->size != 0
3016 && (m->sections[i]->vma < lo
3017 || m->sections[i]->vma > hi
3018 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3019 return m->sections[i];
3024 /* Tweak phdrs before writing them out. */
3027 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3029 const struct elf_backend_data *bed;
3030 struct elf_obj_tdata *tdata;
3031 Elf_Internal_Phdr *phdr, *last;
3032 struct spu_link_hash_table *htab;
3039 bed = get_elf_backend_data (abfd);
3040 tdata = elf_tdata (abfd);
3042 count = tdata->program_header_size / bed->s->sizeof_phdr;
3043 htab = spu_hash_table (info);
3044 if (htab->num_overlays != 0)
3046 struct elf_segment_map *m;
3049 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3051 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
3053 /* Mark this as an overlay header. */
3054 phdr[i].p_flags |= PF_OVERLAY;
3056 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3058 bfd_byte *p = htab->ovtab->contents;
3059 unsigned int off = (o - 1) * 16 + 8;
3061 /* Write file_off into _ovly_table. */
3062 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3067 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3068 of 16. This should always be possible when using the standard
3069 linker scripts, but don't create overlapping segments if
3070 someone is playing games with linker scripts. */
3072 for (i = count; i-- != 0; )
3073 if (phdr[i].p_type == PT_LOAD)
3077 adjust = -phdr[i].p_filesz & 15;
3080 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3083 adjust = -phdr[i].p_memsz & 15;
3086 && phdr[i].p_filesz != 0
3087 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3088 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3091 if (phdr[i].p_filesz != 0)
3095 if (i == (unsigned int) -1)
3096 for (i = count; i-- != 0; )
3097 if (phdr[i].p_type == PT_LOAD)
3101 adjust = -phdr[i].p_filesz & 15;
3102 phdr[i].p_filesz += adjust;
3104 adjust = -phdr[i].p_memsz & 15;
3105 phdr[i].p_memsz += adjust;
3111 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3112 #define TARGET_BIG_NAME "elf32-spu"
3113 #define ELF_ARCH bfd_arch_spu
3114 #define ELF_MACHINE_CODE EM_SPU
3115 /* This matches the alignment need for DMA. */
3116 #define ELF_MAXPAGESIZE 0x80
3117 #define elf_backend_rela_normal 1
3118 #define elf_backend_can_gc_sections 1
3120 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3121 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3122 #define elf_info_to_howto spu_elf_info_to_howto
3123 #define elf_backend_count_relocs spu_elf_count_relocs
3124 #define elf_backend_relocate_section spu_elf_relocate_section
3125 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3126 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3127 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3128 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3129 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3131 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3132 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3133 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3134 #define elf_backend_post_process_headers spu_elf_post_process_headers
3135 #define elf_backend_special_sections spu_elf_special_sections
3136 #define bfd_elf32_bfd_final_link spu_elf_final_link
3138 #include "elf32-target.h"