1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
93 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
94 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
95 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
106 case BFD_RELOC_SPU_IMM10W:
108 case BFD_RELOC_SPU_IMM16W:
110 case BFD_RELOC_SPU_LO16:
111 return R_SPU_ADDR16_LO;
112 case BFD_RELOC_SPU_HI16:
113 return R_SPU_ADDR16_HI;
114 case BFD_RELOC_SPU_IMM18:
116 case BFD_RELOC_SPU_PCREL16:
118 case BFD_RELOC_SPU_IMM7:
120 case BFD_RELOC_SPU_IMM8:
122 case BFD_RELOC_SPU_PCREL9a:
124 case BFD_RELOC_SPU_PCREL9b:
126 case BFD_RELOC_SPU_IMM10:
127 return R_SPU_ADDR10I;
128 case BFD_RELOC_SPU_IMM16:
129 return R_SPU_ADDR16I;
132 case BFD_RELOC_32_PCREL:
134 case BFD_RELOC_SPU_PPU32:
136 case BFD_RELOC_SPU_PPU64:
142 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
144 Elf_Internal_Rela *dst)
146 enum elf_spu_reloc_type r_type;
148 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149 BFD_ASSERT (r_type < R_SPU_max);
150 cache_ptr->howto = &elf_howto_table[(int) r_type];
153 static reloc_howto_type *
154 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155 bfd_reloc_code_real_type code)
157 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
159 if (r_type == R_SPU_NONE)
162 return elf_howto_table + r_type;
165 static reloc_howto_type *
166 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
171 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172 if (elf_howto_table[i].name != NULL
173 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174 return &elf_howto_table[i];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183 void *data, asection *input_section,
184 bfd *output_bfd, char **error_message)
186 bfd_size_type octets;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd != NULL)
194 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195 input_section, output_bfd, error_message);
197 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198 return bfd_reloc_outofrange;
199 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol->section))
205 if (symbol->section->output_section)
206 val += symbol->section->output_section->vma;
208 val += reloc_entry->addend;
210 /* Make it pc-relative. */
211 val -= input_section->output_section->vma + input_section->output_offset;
214 if (val + 256 >= 512)
215 return bfd_reloc_overflow;
217 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222 insn &= ~reloc_entry->howto->dst_mask;
223 insn |= val & reloc_entry->howto->dst_mask;
224 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
229 spu_elf_new_section_hook (bfd *abfd, asection *sec)
231 if (!sec->used_by_bfd)
233 struct _spu_elf_section_data *sdata;
235 sdata = bfd_zalloc (abfd, sizeof (*sdata));
238 sec->used_by_bfd = sdata;
241 return _bfd_elf_new_section_hook (abfd, sec);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd *abfd)
249 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
251 unsigned int i, num_ovl, num_buf;
252 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254 Elf_Internal_Phdr *last_phdr = NULL;
256 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
262 if (last_phdr == NULL
263 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
266 for (j = 1; j < elf_numsections (abfd); j++)
268 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
272 asection *sec = shdr->bfd_section;
273 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
288 if (sym->name != NULL
289 && sym->section != bfd_abs_section_ptr
290 && strncmp (sym->name, "_EAR_", 5) == 0)
291 sym->flags |= BSF_KEEP;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf;
300 struct spu_elf_params *params;
302 /* Shortcuts to overlay sections. */
308 /* Count of stubs in each overlay section. */
309 unsigned int *stub_count;
311 /* The stub section for each overlay section. */
314 struct elf_link_hash_entry *ovly_entry[2];
316 /* Number of overlay buffers. */
317 unsigned int num_buf;
319 /* Total number of overlays. */
320 unsigned int num_overlays;
322 /* For soft icache. */
323 unsigned int line_size_log2;
324 unsigned int num_lines_log2;
325 unsigned int fromelem_size_log2;
327 /* How much memory we have. */
328 unsigned int local_store;
329 /* Local store --auto-overlay should reserve for non-overlay
330 functions and data. */
331 unsigned int overlay_fixed;
332 /* Local store --auto-overlay should reserve for stack and heap. */
333 unsigned int reserved;
334 /* If reserved is not specified, stack analysis will calculate a value
335 for the stack. This parameter adjusts that value to allow for
336 negative sp access (the ABI says 2000 bytes below sp are valid,
337 and the overlay manager uses some of this area). */
338 int extra_stack_space;
339 /* Count of overlay stubs needed in non-overlay area. */
340 unsigned int non_ovly_stub;
343 unsigned int stub_err : 1;
346 /* Hijack the generic got fields for overlay stub accounting. */
350 struct got_entry *next;
359 #define spu_hash_table(p) \
360 ((struct spu_link_hash_table *) ((p)->hash))
364 struct function_info *fun;
365 struct call_info *next;
367 unsigned int max_depth;
368 unsigned int is_tail : 1;
369 unsigned int is_pasted : 1;
370 unsigned int broken_cycle : 1;
371 unsigned int priority : 13;
376 /* List of functions called. Also branches to hot/cold part of
378 struct call_info *call_list;
379 /* For hot/cold part of function, point to owner. */
380 struct function_info *start;
381 /* Symbol at start of function. */
383 Elf_Internal_Sym *sym;
384 struct elf_link_hash_entry *h;
386 /* Function section. */
389 /* Where last called from, and number of sections called from. */
390 asection *last_caller;
391 unsigned int call_count;
392 /* Address range of (this part of) function. */
394 /* Offset where we found a store of lr, or -1 if none found. */
396 /* Offset where we found the stack adjustment insn. */
400 /* Distance from root of call tree. Tail and hot/cold branches
401 count as one deeper. We aren't counting stack frames here. */
403 /* Set if global symbol. */
404 unsigned int global : 1;
405 /* Set if known to be start of function (as distinct from a hunk
406 in hot/cold section. */
407 unsigned int is_func : 1;
408 /* Set if not a root node. */
409 unsigned int non_root : 1;
410 /* Flags used during call tree traversal. It's cheaper to replicate
411 the visit flags than have one which needs clearing after a traversal. */
412 unsigned int visit1 : 1;
413 unsigned int visit2 : 1;
414 unsigned int marking : 1;
415 unsigned int visit3 : 1;
416 unsigned int visit4 : 1;
417 unsigned int visit5 : 1;
418 unsigned int visit6 : 1;
419 unsigned int visit7 : 1;
422 struct spu_elf_stack_info
426 /* Variable size array describing functions, one per contiguous
427 address range belonging to a function. */
428 struct function_info fun[1];
431 static struct function_info *find_function (asection *, bfd_vma,
432 struct bfd_link_info *);
434 /* Create a spu ELF linker hash table. */
436 static struct bfd_link_hash_table *
437 spu_elf_link_hash_table_create (bfd *abfd)
439 struct spu_link_hash_table *htab;
441 htab = bfd_malloc (sizeof (*htab));
445 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
446 _bfd_elf_link_hash_newfunc,
447 sizeof (struct elf_link_hash_entry)))
453 memset (&htab->ovtab, 0,
454 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
456 htab->elf.init_got_refcount.refcount = 0;
457 htab->elf.init_got_refcount.glist = NULL;
458 htab->elf.init_got_offset.offset = 0;
459 htab->elf.init_got_offset.glist = NULL;
460 return &htab->elf.root;
464 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
466 bfd_vma max_branch_log2;
468 struct spu_link_hash_table *htab = spu_hash_table (info);
469 htab->params = params;
470 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
471 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
473 /* For the software i-cache, we provide a "from" list whose size
474 is a power-of-two number of quadwords, big enough to hold one
475 byte per outgoing branch. Compute this number here. */
476 max_branch_log2 = bfd_log2 (htab->params->max_branch);
477 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
480 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
481 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
482 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
485 get_sym_h (struct elf_link_hash_entry **hp,
486 Elf_Internal_Sym **symp,
488 Elf_Internal_Sym **locsymsp,
489 unsigned long r_symndx,
492 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
494 if (r_symndx >= symtab_hdr->sh_info)
496 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
497 struct elf_link_hash_entry *h;
499 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
500 while (h->root.type == bfd_link_hash_indirect
501 || h->root.type == bfd_link_hash_warning)
502 h = (struct elf_link_hash_entry *) h->root.u.i.link;
512 asection *symsec = NULL;
513 if (h->root.type == bfd_link_hash_defined
514 || h->root.type == bfd_link_hash_defweak)
515 symsec = h->root.u.def.section;
521 Elf_Internal_Sym *sym;
522 Elf_Internal_Sym *locsyms = *locsymsp;
526 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
528 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
530 0, NULL, NULL, NULL);
535 sym = locsyms + r_symndx;
544 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
550 /* Create the note section if not already present. This is done early so
551 that the linker maps the sections to the right place in the output. */
554 spu_elf_create_sections (struct bfd_link_info *info)
558 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
559 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
564 /* Make SPU_PTNOTE_SPUNAME section. */
571 ibfd = info->input_bfds;
572 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
573 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
575 || !bfd_set_section_alignment (ibfd, s, 4))
578 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
579 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
580 size += (name_len + 3) & -4;
582 if (!bfd_set_section_size (ibfd, s, size))
585 data = bfd_zalloc (ibfd, size);
589 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
590 bfd_put_32 (ibfd, name_len, data + 4);
591 bfd_put_32 (ibfd, 1, data + 8);
592 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
593 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
594 bfd_get_filename (info->output_bfd), name_len);
601 /* qsort predicate to sort sections by vma. */
604 sort_sections (const void *a, const void *b)
606 const asection *const *s1 = a;
607 const asection *const *s2 = b;
608 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
611 return delta < 0 ? -1 : 1;
613 return (*s1)->index - (*s2)->index;
616 /* Identify overlays in the output bfd, and number them.
617 Returns 0 on error, 1 if no overlays, 2 if overlays. */
620 spu_elf_find_overlays (struct bfd_link_info *info)
622 struct spu_link_hash_table *htab = spu_hash_table (info);
623 asection **alloc_sec;
624 unsigned int i, n, ovl_index, num_buf;
627 static const char *const entry_names[2][2] = {
628 { "__ovly_load", "__icache_br_handler" },
629 { "__ovly_return", "__icache_call_handler" }
632 if (info->output_bfd->section_count < 2)
636 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
637 if (alloc_sec == NULL)
640 /* Pick out all the alloced sections. */
641 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
642 if ((s->flags & SEC_ALLOC) != 0
643 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
653 /* Sort them by vma. */
654 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
656 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
657 if (htab->params->ovly_flavour == ovly_soft_icache)
659 /* Look for an overlapping vma to find the first overlay section. */
660 bfd_vma vma_start = 0;
661 bfd_vma lma_start = 0;
663 for (i = 1; i < n; i++)
666 if (s->vma < ovl_end)
668 asection *s0 = alloc_sec[i - 1];
670 if (strncmp (s0->name, ".ovl.init", 9) != 0)
676 << (htab->num_lines_log2 + htab->line_size_log2)));
681 ovl_end = s->vma + s->size;
684 /* Now find any sections within the cache area. */
685 for (ovl_index = 0, num_buf = 0; i < n; i++)
688 if (s->vma >= ovl_end)
691 /* A section in an overlay area called .ovl.init is not
692 an overlay, in the sense that it might be loaded in
693 by the overlay manager, but rather the initial
694 section contents for the overlay buffer. */
695 if (strncmp (s->name, ".ovl.init", 9) != 0)
697 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
698 if (((s->vma - vma_start) & (htab->params->line_size - 1))
699 || ((s->lma - lma_start) & (htab->params->line_size - 1)))
701 info->callbacks->einfo (_("%X%P: overlay section %A "
702 "does not start on a cache line.\n"),
704 bfd_set_error (bfd_error_bad_value);
707 else if (s->size > htab->params->line_size)
709 info->callbacks->einfo (_("%X%P: overlay section %A "
710 "is larger than a cache line.\n"),
712 bfd_set_error (bfd_error_bad_value);
716 alloc_sec[ovl_index++] = s;
717 spu_elf_section_data (s)->u.o.ovl_index
718 = ((s->lma - lma_start) >> htab->line_size_log2) + 1;
719 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
723 /* Ensure there are no more overlay sections. */
727 if (s->vma < ovl_end)
729 info->callbacks->einfo (_("%X%P: overlay section %A "
730 "is not in cache area.\n"),
732 bfd_set_error (bfd_error_bad_value);
736 ovl_end = s->vma + s->size;
741 /* Look for overlapping vmas. Any with overlap must be overlays.
742 Count them. Also count the number of overlay regions. */
743 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
746 if (s->vma < ovl_end)
748 asection *s0 = alloc_sec[i - 1];
750 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
753 if (strncmp (s0->name, ".ovl.init", 9) != 0)
755 alloc_sec[ovl_index] = s0;
756 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
757 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
760 ovl_end = s->vma + s->size;
762 if (strncmp (s->name, ".ovl.init", 9) != 0)
764 alloc_sec[ovl_index] = s;
765 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
766 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
767 if (s0->vma != s->vma)
769 info->callbacks->einfo (_("%X%P: overlay sections %A "
770 "and %A do not start at the "
773 bfd_set_error (bfd_error_bad_value);
776 if (ovl_end < s->vma + s->size)
777 ovl_end = s->vma + s->size;
781 ovl_end = s->vma + s->size;
785 htab->num_overlays = ovl_index;
786 htab->num_buf = num_buf;
787 htab->ovl_sec = alloc_sec;
792 for (i = 0; i < 2; i++)
795 struct elf_link_hash_entry *h;
797 name = entry_names[i][htab->params->ovly_flavour];
798 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
802 if (h->root.type == bfd_link_hash_new)
804 h->root.type = bfd_link_hash_undefined;
806 h->ref_regular_nonweak = 1;
809 htab->ovly_entry[i] = h;
815 /* Non-zero to use bra in overlay stubs rather than br. */
818 #define BRA 0x30000000
819 #define BRASL 0x31000000
820 #define BR 0x32000000
821 #define BRSL 0x33000000
822 #define NOP 0x40200000
823 #define LNOP 0x00200000
824 #define ILA 0x42000000
826 /* Return true for all relative and absolute branch instructions.
834 brhnz 00100011 0.. */
837 is_branch (const unsigned char *insn)
839 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
842 /* Return true for all indirect branch instructions.
850 bihnz 00100101 011 */
853 is_indirect_branch (const unsigned char *insn)
855 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
858 /* Return true for branch hint instructions.
863 is_hint (const unsigned char *insn)
865 return (insn[0] & 0xfc) == 0x10;
868 /* True if INPUT_SECTION might need overlay stubs. */
871 maybe_needs_stubs (asection *input_section)
873 /* No stubs for debug sections and suchlike. */
874 if ((input_section->flags & SEC_ALLOC) == 0)
877 /* No stubs for link-once sections that will be discarded. */
878 if (input_section->output_section == bfd_abs_section_ptr)
881 /* Don't create stubs for .eh_frame references. */
882 if (strcmp (input_section->name, ".eh_frame") == 0)
904 /* Return non-zero if this reloc symbol should go via an overlay stub.
905 Return 2 if the stub must be in non-overlay area. */
907 static enum _stub_type
908 needs_ovl_stub (struct elf_link_hash_entry *h,
909 Elf_Internal_Sym *sym,
911 asection *input_section,
912 Elf_Internal_Rela *irela,
914 struct bfd_link_info *info)
916 struct spu_link_hash_table *htab = spu_hash_table (info);
917 enum elf_spu_reloc_type r_type;
918 unsigned int sym_type;
919 bfd_boolean branch, hint, call;
920 enum _stub_type ret = no_stub;
924 || sym_sec->output_section == bfd_abs_section_ptr
925 || spu_elf_section_data (sym_sec->output_section) == NULL)
930 /* Ensure no stubs for user supplied overlay manager syms. */
931 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
934 /* setjmp always goes via an overlay stub, because then the return
935 and hence the longjmp goes via __ovly_return. That magically
936 makes setjmp/longjmp between overlays work. */
937 if (strncmp (h->root.root.string, "setjmp", 6) == 0
938 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
945 sym_type = ELF_ST_TYPE (sym->st_info);
947 r_type = ELF32_R_TYPE (irela->r_info);
951 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
953 if (contents == NULL)
956 if (!bfd_get_section_contents (input_section->owner,
963 contents += irela->r_offset;
965 branch = is_branch (contents);
966 hint = is_hint (contents);
969 call = (contents[0] & 0xfd) == 0x31;
971 && sym_type != STT_FUNC
974 /* It's common for people to write assembly and forget
975 to give function symbols the right type. Handle
976 calls to such symbols, but warn so that (hopefully)
977 people will fix their code. We need the symbol
978 type to be correct to distinguish function pointer
979 initialisation from other pointer initialisations. */
980 const char *sym_name;
983 sym_name = h->root.root.string;
986 Elf_Internal_Shdr *symtab_hdr;
987 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
988 sym_name = bfd_elf_sym_name (input_section->owner,
993 (*_bfd_error_handler) (_("warning: call to non-function"
994 " symbol %s defined in %B"),
995 sym_sec->owner, sym_name);
1001 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1002 || (sym_type != STT_FUNC
1003 && !(branch || hint)
1004 && (sym_sec->flags & SEC_CODE) == 0))
1007 /* Usually, symbols in non-overlay sections don't need stubs. */
1008 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1009 && !htab->params->non_overlay_stubs)
1012 /* A reference from some other section to a symbol in an overlay
1013 section needs a stub. */
1014 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1015 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1017 unsigned int lrlive = 0;
1019 lrlive = (contents[1] & 0x70) >> 4;
1021 if (!lrlive && (call || sym_type == STT_FUNC))
1022 ret = call_ovl_stub;
1024 ret = br000_ovl_stub + lrlive;
1027 /* If this insn isn't a branch then we are possibly taking the
1028 address of a function and passing it out somehow. Soft-icache code
1029 always generates inline code to do indirect branches. */
1030 if (!(branch || hint)
1031 && sym_type == STT_FUNC
1032 && htab->params->ovly_flavour != ovly_soft_icache)
1039 count_stub (struct spu_link_hash_table *htab,
1042 enum _stub_type stub_type,
1043 struct elf_link_hash_entry *h,
1044 const Elf_Internal_Rela *irela)
1046 unsigned int ovl = 0;
1047 struct got_entry *g, **head;
1050 /* If this instruction is a branch or call, we need a stub
1051 for it. One stub per function per overlay.
1052 If it isn't a branch, then we are taking the address of
1053 this function so need a stub in the non-overlay area
1054 for it. One stub per function. */
1055 if (stub_type != nonovl_stub)
1056 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1059 head = &h->got.glist;
1062 if (elf_local_got_ents (ibfd) == NULL)
1064 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1065 * sizeof (*elf_local_got_ents (ibfd)));
1066 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1067 if (elf_local_got_ents (ibfd) == NULL)
1070 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1073 if (htab->params->ovly_flavour == ovly_soft_icache)
1075 htab->stub_count[ovl] += 1;
1081 addend = irela->r_addend;
1085 struct got_entry *gnext;
1087 for (g = *head; g != NULL; g = g->next)
1088 if (g->addend == addend && g->ovl == 0)
1093 /* Need a new non-overlay area stub. Zap other stubs. */
1094 for (g = *head; g != NULL; g = gnext)
1097 if (g->addend == addend)
1099 htab->stub_count[g->ovl] -= 1;
1107 for (g = *head; g != NULL; g = g->next)
1108 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1114 g = bfd_malloc (sizeof *g);
1119 g->stub_addr = (bfd_vma) -1;
1123 htab->stub_count[ovl] += 1;
1129 /* Support two sizes of overlay stubs, a slower more compact stub of two
1130 intructions, and a faster stub of four instructions.
1131 Soft-icache stubs are four or eight words. */
1134 ovl_stub_size (struct spu_elf_params *params)
1136 return 16 << params->ovly_flavour >> params->compact_stub;
1140 ovl_stub_size_log2 (struct spu_elf_params *params)
1142 return 4 + params->ovly_flavour - params->compact_stub;
1145 /* Two instruction overlay stubs look like:
1147 brsl $75,__ovly_load
1148 .word target_ovl_and_address
1150 ovl_and_address is a word with the overlay number in the top 14 bits
1151 and local store address in the bottom 18 bits.
1153 Four instruction overlay stubs look like:
1157 ila $79,target_address
1160 Software icache stubs are:
1164 .word lrlive_branchlocalstoreaddr;
1165 brasl $75,__icache_br_handler
1170 build_stub (struct bfd_link_info *info,
1173 enum _stub_type stub_type,
1174 struct elf_link_hash_entry *h,
1175 const Elf_Internal_Rela *irela,
1179 struct spu_link_hash_table *htab = spu_hash_table (info);
1180 unsigned int ovl, dest_ovl, set_id;
1181 struct got_entry *g, **head;
1183 bfd_vma addend, from, to, br_dest, patt;
1184 unsigned int lrlive;
1187 if (stub_type != nonovl_stub)
1188 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1191 head = &h->got.glist;
1193 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1197 addend = irela->r_addend;
1199 if (htab->params->ovly_flavour == ovly_soft_icache)
1201 g = bfd_malloc (sizeof *g);
1207 g->br_addr = (irela->r_offset
1208 + isec->output_offset
1209 + isec->output_section->vma);
1215 for (g = *head; g != NULL; g = g->next)
1216 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1221 if (g->ovl == 0 && ovl != 0)
1224 if (g->stub_addr != (bfd_vma) -1)
1228 sec = htab->stub_sec[ovl];
1229 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1230 from = sec->size + sec->output_offset + sec->output_section->vma;
1231 g->stub_addr = from;
1232 to = (htab->ovly_entry[0]->root.u.def.value
1233 + htab->ovly_entry[0]->root.u.def.section->output_offset
1234 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1236 if (((dest | to | from) & 3) != 0)
1241 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1243 if (htab->params->ovly_flavour == ovly_normal
1244 && !htab->params->compact_stub)
1246 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1247 sec->contents + sec->size);
1248 bfd_put_32 (sec->owner, LNOP,
1249 sec->contents + sec->size + 4);
1250 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1251 sec->contents + sec->size + 8);
1253 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1254 sec->contents + sec->size + 12);
1256 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1257 sec->contents + sec->size + 12);
1259 else if (htab->params->ovly_flavour == ovly_normal
1260 && htab->params->compact_stub)
1263 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1264 sec->contents + sec->size);
1266 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1267 sec->contents + sec->size);
1268 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1269 sec->contents + sec->size + 4);
1271 else if (htab->params->ovly_flavour == ovly_soft_icache
1272 && htab->params->compact_stub)
1275 if (stub_type == nonovl_stub)
1277 else if (stub_type == call_ovl_stub)
1278 /* A brsl makes lr live and *(*sp+16) is live.
1279 Tail calls have the same liveness. */
1281 else if (!htab->params->lrlive_analysis)
1282 /* Assume stack frame and lr save. */
1284 else if (irela != NULL)
1286 /* Analyse branch instructions. */
1287 struct function_info *caller;
1290 caller = find_function (isec, irela->r_offset, info);
1291 if (caller->start == NULL)
1292 off = irela->r_offset;
1295 struct function_info *found = NULL;
1297 /* Find the earliest piece of this function that
1298 has frame adjusting instructions. We might
1299 see dynamic frame adjustment (eg. for alloca)
1300 in some later piece, but functions using
1301 alloca always set up a frame earlier. Frame
1302 setup instructions are always in one piece. */
1303 if (caller->lr_store != (bfd_vma) -1
1304 || caller->sp_adjust != (bfd_vma) -1)
1306 while (caller->start != NULL)
1308 caller = caller->start;
1309 if (caller->lr_store != (bfd_vma) -1
1310 || caller->sp_adjust != (bfd_vma) -1)
1318 if (off > caller->sp_adjust)
1320 if (off > caller->lr_store)
1321 /* Only *(*sp+16) is live. */
1324 /* If no lr save, then we must be in a
1325 leaf function with a frame.
1326 lr is still live. */
1329 else if (off > caller->lr_store)
1331 /* Between lr save and stack adjust. */
1333 /* This should never happen since prologues won't
1338 /* On entry to function. */
1341 if (stub_type != br000_ovl_stub
1342 && lrlive != stub_type - br000_ovl_stub)
1343 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1344 "from analysis (%u)\n"),
1345 isec, irela->r_offset, lrlive,
1346 stub_type - br000_ovl_stub);
1349 /* If given lrlive info via .brinfo, use it. */
1350 if (stub_type > br000_ovl_stub)
1351 lrlive = stub_type - br000_ovl_stub;
1354 to = (htab->ovly_entry[1]->root.u.def.value
1355 + htab->ovly_entry[1]->root.u.def.section->output_offset
1356 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1358 /* The branch that uses this stub goes to stub_addr + 4. We'll
1359 set up an xor pattern that can be used by the icache manager
1360 to modify this branch to go directly to its destination. */
1362 br_dest = g->stub_addr;
1365 /* Except in the case of _SPUEAR_ stubs, the branch in
1366 question is the one in the stub itself. */
1367 BFD_ASSERT (stub_type == nonovl_stub);
1368 g->br_addr = g->stub_addr;
1372 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1373 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1374 sec->contents + sec->size);
1375 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1376 sec->contents + sec->size + 4);
1377 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1378 sec->contents + sec->size + 8);
1379 patt = dest ^ br_dest;
1380 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1381 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1382 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1383 sec->contents + sec->size + 12);
1386 /* Extra space for linked list entries. */
1392 sec->size += ovl_stub_size (htab->params);
1394 if (htab->params->emit_stub_syms)
1400 len = 8 + sizeof (".ovl_call.") - 1;
1402 len += strlen (h->root.root.string);
1407 add = (int) irela->r_addend & 0xffffffff;
1410 name = bfd_malloc (len);
1414 sprintf (name, "%08x.ovl_call.", g->ovl);
1416 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1418 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1419 dest_sec->id & 0xffffffff,
1420 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1422 sprintf (name + len - 9, "+%x", add);
1424 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1428 if (h->root.type == bfd_link_hash_new)
1430 h->root.type = bfd_link_hash_defined;
1431 h->root.u.def.section = sec;
1432 h->size = ovl_stub_size (htab->params);
1433 h->root.u.def.value = sec->size - h->size;
1437 h->ref_regular_nonweak = 1;
1438 h->forced_local = 1;
1446 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1450 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1452 /* Symbols starting with _SPUEAR_ need a stub because they may be
1453 invoked by the PPU. */
1454 struct bfd_link_info *info = inf;
1455 struct spu_link_hash_table *htab = spu_hash_table (info);
1458 if ((h->root.type == bfd_link_hash_defined
1459 || h->root.type == bfd_link_hash_defweak)
1461 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1462 && (sym_sec = h->root.u.def.section) != NULL
1463 && sym_sec->output_section != bfd_abs_section_ptr
1464 && spu_elf_section_data (sym_sec->output_section) != NULL
1465 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1466 || htab->params->non_overlay_stubs))
1468 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1475 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1477 /* Symbols starting with _SPUEAR_ need a stub because they may be
1478 invoked by the PPU. */
1479 struct bfd_link_info *info = inf;
1480 struct spu_link_hash_table *htab = spu_hash_table (info);
1483 if ((h->root.type == bfd_link_hash_defined
1484 || h->root.type == bfd_link_hash_defweak)
1486 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1487 && (sym_sec = h->root.u.def.section) != NULL
1488 && sym_sec->output_section != bfd_abs_section_ptr
1489 && spu_elf_section_data (sym_sec->output_section) != NULL
1490 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1491 || htab->params->non_overlay_stubs))
1493 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1494 h->root.u.def.value, sym_sec);
1500 /* Size or build stubs. */
1503 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1505 struct spu_link_hash_table *htab = spu_hash_table (info);
1508 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1510 extern const bfd_target bfd_elf32_spu_vec;
1511 Elf_Internal_Shdr *symtab_hdr;
1513 Elf_Internal_Sym *local_syms = NULL;
1515 if (ibfd->xvec != &bfd_elf32_spu_vec)
1518 /* We'll need the symbol table in a second. */
1519 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1520 if (symtab_hdr->sh_info == 0)
1523 /* Walk over each section attached to the input bfd. */
1524 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1526 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1528 /* If there aren't any relocs, then there's nothing more to do. */
1529 if ((isec->flags & SEC_RELOC) == 0
1530 || isec->reloc_count == 0)
1533 if (!maybe_needs_stubs (isec))
1536 /* Get the relocs. */
1537 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1539 if (internal_relocs == NULL)
1540 goto error_ret_free_local;
1542 /* Now examine each relocation. */
1543 irela = internal_relocs;
1544 irelaend = irela + isec->reloc_count;
1545 for (; irela < irelaend; irela++)
1547 enum elf_spu_reloc_type r_type;
1548 unsigned int r_indx;
1550 Elf_Internal_Sym *sym;
1551 struct elf_link_hash_entry *h;
1552 enum _stub_type stub_type;
1554 r_type = ELF32_R_TYPE (irela->r_info);
1555 r_indx = ELF32_R_SYM (irela->r_info);
1557 if (r_type >= R_SPU_max)
1559 bfd_set_error (bfd_error_bad_value);
1560 error_ret_free_internal:
1561 if (elf_section_data (isec)->relocs != internal_relocs)
1562 free (internal_relocs);
1563 error_ret_free_local:
1564 if (local_syms != NULL
1565 && (symtab_hdr->contents
1566 != (unsigned char *) local_syms))
1571 /* Determine the reloc target section. */
1572 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1573 goto error_ret_free_internal;
1575 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1577 if (stub_type == no_stub)
1579 else if (stub_type == stub_error)
1580 goto error_ret_free_internal;
1582 if (htab->stub_count == NULL)
1585 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1586 htab->stub_count = bfd_zmalloc (amt);
1587 if (htab->stub_count == NULL)
1588 goto error_ret_free_internal;
1593 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1594 goto error_ret_free_internal;
1601 dest = h->root.u.def.value;
1603 dest = sym->st_value;
1604 dest += irela->r_addend;
1605 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1607 goto error_ret_free_internal;
1611 /* We're done with the internal relocs, free them. */
1612 if (elf_section_data (isec)->relocs != internal_relocs)
1613 free (internal_relocs);
1616 if (local_syms != NULL
1617 && symtab_hdr->contents != (unsigned char *) local_syms)
1619 if (!info->keep_memory)
1622 symtab_hdr->contents = (unsigned char *) local_syms;
1629 /* Allocate space for overlay call and return stubs.
1630 Return 0 on error, 1 if no stubs, 2 otherwise. */
1633 spu_elf_size_stubs (struct bfd_link_info *info)
1635 struct spu_link_hash_table *htab;
1642 if (!process_stubs (info, FALSE))
1645 htab = spu_hash_table (info);
1646 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1650 if (htab->stub_count == NULL)
1653 ibfd = info->input_bfds;
1654 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1655 htab->stub_sec = bfd_zmalloc (amt);
1656 if (htab->stub_sec == NULL)
1659 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1660 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1661 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1662 htab->stub_sec[0] = stub;
1664 || !bfd_set_section_alignment (ibfd, stub,
1665 ovl_stub_size_log2 (htab->params)))
1667 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1668 if (htab->params->ovly_flavour == ovly_soft_icache)
1669 /* Extra space for linked list entries. */
1670 stub->size += htab->stub_count[0] * 16;
1672 for (i = 0; i < htab->num_overlays; ++i)
1674 asection *osec = htab->ovl_sec[i];
1675 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1676 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1677 htab->stub_sec[ovl] = stub;
1679 || !bfd_set_section_alignment (ibfd, stub,
1680 ovl_stub_size_log2 (htab->params)))
1682 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1685 if (htab->params->ovly_flavour == ovly_soft_icache)
1687 /* Space for icache manager tables.
1688 a) Tag array, one quadword per cache line.
1689 b) Rewrite "to" list, one quadword per cache line.
1690 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1691 a power-of-two number of full quadwords) per cache line. */
1694 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1695 if (htab->ovtab == NULL
1696 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1699 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1700 << htab->num_lines_log2;
1702 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1703 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1704 if (htab->init == NULL
1705 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1708 htab->init->size = 16;
1712 /* htab->ovtab consists of two arrays.
1722 . } _ovly_buf_table[];
1725 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1726 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1727 if (htab->ovtab == NULL
1728 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1731 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1734 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1735 if (htab->toe == NULL
1736 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1738 htab->toe->size = 16;
1743 /* Called from ld to place overlay manager data sections. This is done
1744 after the overlay manager itself is loaded, mainly so that the
1745 linker's htab->init section is placed after any other .ovl.init
1749 spu_elf_place_overlay_data (struct bfd_link_info *info)
1751 struct spu_link_hash_table *htab = spu_hash_table (info);
1755 if (htab->stub_count == NULL)
1758 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1760 for (i = 0; i < htab->num_overlays; ++i)
1762 asection *osec = htab->ovl_sec[i];
1763 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1764 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1767 if (htab->params->ovly_flavour == ovly_soft_icache)
1768 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1771 if (htab->params->ovly_flavour == ovly_soft_icache)
1773 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1775 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1778 /* Functions to handle embedded spu_ovl.o object. */
1781 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1787 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1793 struct _ovl_stream *os;
1797 os = (struct _ovl_stream *) stream;
1798 max = (const char *) os->end - (const char *) os->start;
1800 if ((ufile_ptr) offset >= max)
1804 if (count > max - offset)
1805 count = max - offset;
1807 memcpy (buf, (const char *) os->start + offset, count);
1812 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1814 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1821 return *ovl_bfd != NULL;
1825 overlay_index (asection *sec)
1828 || sec->output_section == bfd_abs_section_ptr)
1830 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1833 /* Define an STT_OBJECT symbol. */
1835 static struct elf_link_hash_entry *
1836 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1838 struct elf_link_hash_entry *h;
1840 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1844 if (h->root.type != bfd_link_hash_defined
1847 h->root.type = bfd_link_hash_defined;
1848 h->root.u.def.section = htab->ovtab;
1849 h->type = STT_OBJECT;
1852 h->ref_regular_nonweak = 1;
1855 else if (h->root.u.def.section->owner != NULL)
1857 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1858 h->root.u.def.section->owner,
1859 h->root.root.string);
1860 bfd_set_error (bfd_error_bad_value);
1865 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1866 h->root.root.string);
1867 bfd_set_error (bfd_error_bad_value);
1874 /* Fill in all stubs and the overlay tables. */
1877 spu_elf_build_stubs (struct bfd_link_info *info)
1879 struct spu_link_hash_table *htab = spu_hash_table (info);
1880 struct elf_link_hash_entry *h;
1886 if (htab->stub_count == NULL)
1889 for (i = 0; i <= htab->num_overlays; i++)
1890 if (htab->stub_sec[i]->size != 0)
1892 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1893 htab->stub_sec[i]->size);
1894 if (htab->stub_sec[i]->contents == NULL)
1896 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1897 htab->stub_sec[i]->size = 0;
1900 for (i = 0; i < 2; i++)
1902 h = htab->ovly_entry[i];
1903 BFD_ASSERT (h != NULL);
1905 if ((h->root.type == bfd_link_hash_defined
1906 || h->root.type == bfd_link_hash_defweak)
1909 s = h->root.u.def.section->output_section;
1910 if (spu_elf_section_data (s)->u.o.ovl_index)
1912 (*_bfd_error_handler) (_("%s in overlay section"),
1913 h->root.root.string);
1914 bfd_set_error (bfd_error_bad_value);
1922 /* Fill in all the stubs. */
1923 process_stubs (info, TRUE);
1924 if (!htab->stub_err)
1925 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1929 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1930 bfd_set_error (bfd_error_bad_value);
1934 for (i = 0; i <= htab->num_overlays; i++)
1936 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1938 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1939 bfd_set_error (bfd_error_bad_value);
1942 htab->stub_sec[i]->rawsize = 0;
1945 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1948 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1949 if (htab->ovtab->contents == NULL)
1952 p = htab->ovtab->contents;
1953 if (htab->params->ovly_flavour == ovly_soft_icache)
1957 h = define_ovtab_symbol (htab, "__icache_tag_array");
1960 h->root.u.def.value = 0;
1961 h->size = 16 << htab->num_lines_log2;
1964 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1967 h->root.u.def.value = 16 << htab->num_lines_log2;
1968 h->root.u.def.section = bfd_abs_section_ptr;
1970 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1973 h->root.u.def.value = off;
1974 h->size = 16 << htab->num_lines_log2;
1977 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
1980 h->root.u.def.value = 16 << htab->num_lines_log2;
1981 h->root.u.def.section = bfd_abs_section_ptr;
1983 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
1986 h->root.u.def.value = off;
1987 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
1990 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
1993 h->root.u.def.value = 16 << (htab->fromelem_size_log2
1994 + htab->num_lines_log2);
1995 h->root.u.def.section = bfd_abs_section_ptr;
1997 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2000 h->root.u.def.value = htab->fromelem_size_log2;
2001 h->root.u.def.section = bfd_abs_section_ptr;
2003 h = define_ovtab_symbol (htab, "__icache_base");
2006 h->root.u.def.value = htab->ovl_sec[0]->vma;
2007 h->root.u.def.section = bfd_abs_section_ptr;
2008 h->size = htab->num_buf << htab->line_size_log2;
2010 h = define_ovtab_symbol (htab, "__icache_linesize");
2013 h->root.u.def.value = 1 << htab->line_size_log2;
2014 h->root.u.def.section = bfd_abs_section_ptr;
2016 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2019 h->root.u.def.value = htab->line_size_log2;
2020 h->root.u.def.section = bfd_abs_section_ptr;
2022 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2025 h->root.u.def.value = -htab->line_size_log2;
2026 h->root.u.def.section = bfd_abs_section_ptr;
2028 h = define_ovtab_symbol (htab, "__icache_cachesize");
2031 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2032 h->root.u.def.section = bfd_abs_section_ptr;
2034 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2037 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2038 h->root.u.def.section = bfd_abs_section_ptr;
2040 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2043 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2044 h->root.u.def.section = bfd_abs_section_ptr;
2046 if (htab->init != NULL && htab->init->size != 0)
2048 htab->init->contents = bfd_zalloc (htab->init->owner,
2050 if (htab->init->contents == NULL)
2053 h = define_ovtab_symbol (htab, "__icache_fileoff");
2056 h->root.u.def.value = 0;
2057 h->root.u.def.section = htab->init;
2063 /* Write out _ovly_table. */
2064 /* set low bit of .size to mark non-overlay area as present. */
2066 obfd = htab->ovtab->output_section->owner;
2067 for (s = obfd->sections; s != NULL; s = s->next)
2069 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2073 unsigned long off = ovl_index * 16;
2074 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2076 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2077 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2079 /* file_off written later in spu_elf_modify_program_headers. */
2080 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2084 h = define_ovtab_symbol (htab, "_ovly_table");
2087 h->root.u.def.value = 16;
2088 h->size = htab->num_overlays * 16;
2090 h = define_ovtab_symbol (htab, "_ovly_table_end");
2093 h->root.u.def.value = htab->num_overlays * 16 + 16;
2096 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2099 h->root.u.def.value = htab->num_overlays * 16 + 16;
2100 h->size = htab->num_buf * 4;
2102 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2105 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2109 h = define_ovtab_symbol (htab, "_EAR_");
2112 h->root.u.def.section = htab->toe;
2113 h->root.u.def.value = 0;
2119 /* Check that all loadable section VMAs lie in the range
2120 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2123 spu_elf_check_vma (struct bfd_link_info *info)
2125 struct elf_segment_map *m;
2127 struct spu_link_hash_table *htab = spu_hash_table (info);
2128 bfd *abfd = info->output_bfd;
2129 bfd_vma hi = htab->params->local_store_hi;
2130 bfd_vma lo = htab->params->local_store_lo;
2132 htab->local_store = hi + 1 - lo;
2134 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2135 if (m->p_type == PT_LOAD)
2136 for (i = 0; i < m->count; i++)
2137 if (m->sections[i]->size != 0
2138 && (m->sections[i]->vma < lo
2139 || m->sections[i]->vma > hi
2140 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2141 return m->sections[i];
2146 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2147 Search for stack adjusting insns, and return the sp delta.
2148 If a store of lr is found save the instruction offset to *LR_STORE.
2149 If a stack adjusting instruction is found, save that offset to
2153 find_function_stack_adjust (asection *sec,
2160 memset (reg, 0, sizeof (reg));
2161 for ( ; offset + 4 <= sec->size; offset += 4)
2163 unsigned char buf[4];
2167 /* Assume no relocs on stack adjusing insns. */
2168 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2172 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2174 if (buf[0] == 0x24 /* stqd */)
2176 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2181 /* Partly decoded immediate field. */
2182 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2184 if (buf[0] == 0x1c /* ai */)
2187 imm = (imm ^ 0x200) - 0x200;
2188 reg[rt] = reg[ra] + imm;
2190 if (rt == 1 /* sp */)
2194 *sp_adjust = offset;
2198 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2200 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2202 reg[rt] = reg[ra] + reg[rb];
2207 *sp_adjust = offset;
2211 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2213 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2215 reg[rt] = reg[rb] - reg[ra];
2220 *sp_adjust = offset;
2224 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2226 if (buf[0] >= 0x42 /* ila */)
2227 imm |= (buf[0] & 1) << 17;
2232 if (buf[0] == 0x40 /* il */)
2234 if ((buf[1] & 0x80) == 0)
2236 imm = (imm ^ 0x8000) - 0x8000;
2238 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2244 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2246 reg[rt] |= imm & 0xffff;
2249 else if (buf[0] == 0x04 /* ori */)
2252 imm = (imm ^ 0x200) - 0x200;
2253 reg[rt] = reg[ra] | imm;
2256 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2258 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2259 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2260 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2261 | ((imm & 0x1000) ? 0x000000ff : 0));
2264 else if (buf[0] == 0x16 /* andbi */)
2270 reg[rt] = reg[ra] & imm;
2273 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2275 /* Used in pic reg load. Say rt is trashed. Won't be used
2276 in stack adjust, but we need to continue past this branch. */
2280 else if (is_branch (buf) || is_indirect_branch (buf))
2281 /* If we hit a branch then we must be out of the prologue. */
2288 /* qsort predicate to sort symbols by section and value. */
2290 static Elf_Internal_Sym *sort_syms_syms;
2291 static asection **sort_syms_psecs;
2294 sort_syms (const void *a, const void *b)
2296 Elf_Internal_Sym *const *s1 = a;
2297 Elf_Internal_Sym *const *s2 = b;
2298 asection *sec1,*sec2;
2299 bfd_signed_vma delta;
2301 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2302 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2305 return sec1->index - sec2->index;
2307 delta = (*s1)->st_value - (*s2)->st_value;
2309 return delta < 0 ? -1 : 1;
2311 delta = (*s2)->st_size - (*s1)->st_size;
2313 return delta < 0 ? -1 : 1;
2315 return *s1 < *s2 ? -1 : 1;
2318 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2319 entries for section SEC. */
2321 static struct spu_elf_stack_info *
2322 alloc_stack_info (asection *sec, int max_fun)
2324 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2327 amt = sizeof (struct spu_elf_stack_info);
2328 amt += (max_fun - 1) * sizeof (struct function_info);
2329 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2330 if (sec_data->u.i.stack_info != NULL)
2331 sec_data->u.i.stack_info->max_fun = max_fun;
2332 return sec_data->u.i.stack_info;
2335 /* Add a new struct function_info describing a (part of a) function
2336 starting at SYM_H. Keep the array sorted by address. */
2338 static struct function_info *
2339 maybe_insert_function (asection *sec,
2342 bfd_boolean is_func)
2344 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2345 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2351 sinfo = alloc_stack_info (sec, 20);
2358 Elf_Internal_Sym *sym = sym_h;
2359 off = sym->st_value;
2360 size = sym->st_size;
2364 struct elf_link_hash_entry *h = sym_h;
2365 off = h->root.u.def.value;
2369 for (i = sinfo->num_fun; --i >= 0; )
2370 if (sinfo->fun[i].lo <= off)
2375 /* Don't add another entry for an alias, but do update some
2377 if (sinfo->fun[i].lo == off)
2379 /* Prefer globals over local syms. */
2380 if (global && !sinfo->fun[i].global)
2382 sinfo->fun[i].global = TRUE;
2383 sinfo->fun[i].u.h = sym_h;
2386 sinfo->fun[i].is_func = TRUE;
2387 return &sinfo->fun[i];
2389 /* Ignore a zero-size symbol inside an existing function. */
2390 else if (sinfo->fun[i].hi > off && size == 0)
2391 return &sinfo->fun[i];
2394 if (sinfo->num_fun >= sinfo->max_fun)
2396 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2397 bfd_size_type old = amt;
2399 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2400 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2401 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2402 sinfo = bfd_realloc (sinfo, amt);
2405 memset ((char *) sinfo + old, 0, amt - old);
2406 sec_data->u.i.stack_info = sinfo;
2409 if (++i < sinfo->num_fun)
2410 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2411 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2412 sinfo->fun[i].is_func = is_func;
2413 sinfo->fun[i].global = global;
2414 sinfo->fun[i].sec = sec;
2416 sinfo->fun[i].u.h = sym_h;
2418 sinfo->fun[i].u.sym = sym_h;
2419 sinfo->fun[i].lo = off;
2420 sinfo->fun[i].hi = off + size;
2421 sinfo->fun[i].lr_store = -1;
2422 sinfo->fun[i].sp_adjust = -1;
2423 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2424 &sinfo->fun[i].lr_store,
2425 &sinfo->fun[i].sp_adjust);
2426 sinfo->num_fun += 1;
2427 return &sinfo->fun[i];
2430 /* Return the name of FUN. */
2433 func_name (struct function_info *fun)
2437 Elf_Internal_Shdr *symtab_hdr;
2439 while (fun->start != NULL)
2443 return fun->u.h->root.root.string;
2446 if (fun->u.sym->st_name == 0)
2448 size_t len = strlen (sec->name);
2449 char *name = bfd_malloc (len + 10);
2452 sprintf (name, "%s+%lx", sec->name,
2453 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2457 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2458 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2461 /* Read the instruction at OFF in SEC. Return true iff the instruction
2462 is a nop, lnop, or stop 0 (all zero insn). */
2465 is_nop (asection *sec, bfd_vma off)
2467 unsigned char insn[4];
2469 if (off + 4 > sec->size
2470 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2472 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2474 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2479 /* Extend the range of FUN to cover nop padding up to LIMIT.
2480 Return TRUE iff some instruction other than a NOP was found. */
2483 insns_at_end (struct function_info *fun, bfd_vma limit)
2485 bfd_vma off = (fun->hi + 3) & -4;
2487 while (off < limit && is_nop (fun->sec, off))
2498 /* Check and fix overlapping function ranges. Return TRUE iff there
2499 are gaps in the current info we have about functions in SEC. */
2502 check_function_ranges (asection *sec, struct bfd_link_info *info)
2504 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2505 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2507 bfd_boolean gaps = FALSE;
2512 for (i = 1; i < sinfo->num_fun; i++)
2513 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2515 /* Fix overlapping symbols. */
2516 const char *f1 = func_name (&sinfo->fun[i - 1]);
2517 const char *f2 = func_name (&sinfo->fun[i]);
2519 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2520 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2522 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2525 if (sinfo->num_fun == 0)
2529 if (sinfo->fun[0].lo != 0)
2531 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2533 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2535 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2536 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2538 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2544 /* Search current function info for a function that contains address
2545 OFFSET in section SEC. */
2547 static struct function_info *
2548 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2550 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2551 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2555 hi = sinfo->num_fun;
2558 mid = (lo + hi) / 2;
2559 if (offset < sinfo->fun[mid].lo)
2561 else if (offset >= sinfo->fun[mid].hi)
2564 return &sinfo->fun[mid];
2566 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2568 bfd_set_error (bfd_error_bad_value);
2572 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2573 if CALLEE was new. If this function return FALSE, CALLEE should
2577 insert_callee (struct function_info *caller, struct call_info *callee)
2579 struct call_info **pp, *p;
2581 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2582 if (p->fun == callee->fun)
2584 /* Tail calls use less stack than normal calls. Retain entry
2585 for normal call over one for tail call. */
2586 p->is_tail &= callee->is_tail;
2589 p->fun->start = NULL;
2590 p->fun->is_func = TRUE;
2592 p->count += callee->count;
2593 /* Reorder list so most recent call is first. */
2595 p->next = caller->call_list;
2596 caller->call_list = p;
2599 callee->next = caller->call_list;
2600 caller->call_list = callee;
2604 /* Copy CALL and insert the copy into CALLER. */
2607 copy_callee (struct function_info *caller, const struct call_info *call)
2609 struct call_info *callee;
2610 callee = bfd_malloc (sizeof (*callee));
2614 if (!insert_callee (caller, callee))
2619 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2620 overlay stub sections. */
2623 interesting_section (asection *s)
2625 return (s->output_section != bfd_abs_section_ptr
2626 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2627 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2631 /* Rummage through the relocs for SEC, looking for function calls.
2632 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2633 mark destination symbols on calls as being functions. Also
2634 look at branches, which may be tail calls or go to hot/cold
2635 section part of same function. */
2638 mark_functions_via_relocs (asection *sec,
2639 struct bfd_link_info *info,
2642 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2643 Elf_Internal_Shdr *symtab_hdr;
2645 unsigned int priority = 0;
2646 static bfd_boolean warned;
2648 if (!interesting_section (sec)
2649 || sec->reloc_count == 0)
2652 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2654 if (internal_relocs == NULL)
2657 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2658 psyms = &symtab_hdr->contents;
2659 irela = internal_relocs;
2660 irelaend = irela + sec->reloc_count;
2661 for (; irela < irelaend; irela++)
2663 enum elf_spu_reloc_type r_type;
2664 unsigned int r_indx;
2666 Elf_Internal_Sym *sym;
2667 struct elf_link_hash_entry *h;
2669 bfd_boolean reject, is_call;
2670 struct function_info *caller;
2671 struct call_info *callee;
2674 r_type = ELF32_R_TYPE (irela->r_info);
2675 if (r_type != R_SPU_REL16
2676 && r_type != R_SPU_ADDR16)
2679 if (!(call_tree && spu_hash_table (info)->params->auto_overlay))
2683 r_indx = ELF32_R_SYM (irela->r_info);
2684 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2688 || sym_sec->output_section == bfd_abs_section_ptr)
2694 unsigned char insn[4];
2696 if (!bfd_get_section_contents (sec->owner, sec, insn,
2697 irela->r_offset, 4))
2699 if (is_branch (insn))
2701 is_call = (insn[0] & 0xfd) == 0x31;
2702 priority = insn[1] & 0x0f;
2704 priority |= insn[2];
2706 priority |= insn[3];
2708 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2709 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2712 info->callbacks->einfo
2713 (_("%B(%A+0x%v): call to non-code section"
2714 " %B(%A), analysis incomplete\n"),
2715 sec->owner, sec, irela->r_offset,
2716 sym_sec->owner, sym_sec);
2724 if (!(call_tree && spu_hash_table (info)->params->auto_overlay)
2732 /* For --auto-overlay, count possible stubs we need for
2733 function pointer references. */
2734 unsigned int sym_type;
2738 sym_type = ELF_ST_TYPE (sym->st_info);
2739 if (sym_type == STT_FUNC)
2740 spu_hash_table (info)->non_ovly_stub += 1;
2745 val = h->root.u.def.value;
2747 val = sym->st_value;
2748 val += irela->r_addend;
2752 struct function_info *fun;
2754 if (irela->r_addend != 0)
2756 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2759 fake->st_value = val;
2761 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2765 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2767 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2770 if (irela->r_addend != 0
2771 && fun->u.sym != sym)
2776 caller = find_function (sec, irela->r_offset, info);
2779 callee = bfd_malloc (sizeof *callee);
2783 callee->fun = find_function (sym_sec, val, info);
2784 if (callee->fun == NULL)
2786 callee->is_tail = !is_call;
2787 callee->is_pasted = FALSE;
2788 callee->priority = priority;
2790 if (callee->fun->last_caller != sec)
2792 callee->fun->last_caller = sec;
2793 callee->fun->call_count += 1;
2795 if (!insert_callee (caller, callee))
2798 && !callee->fun->is_func
2799 && callee->fun->stack == 0)
2801 /* This is either a tail call or a branch from one part of
2802 the function to another, ie. hot/cold section. If the
2803 destination has been called by some other function then
2804 it is a separate function. We also assume that functions
2805 are not split across input files. */
2806 if (sec->owner != sym_sec->owner)
2808 callee->fun->start = NULL;
2809 callee->fun->is_func = TRUE;
2811 else if (callee->fun->start == NULL)
2813 struct function_info *caller_start = caller;
2814 while (caller_start->start)
2815 caller_start = caller_start->start;
2817 if (caller_start != callee->fun)
2818 callee->fun->start = caller_start;
2822 struct function_info *callee_start;
2823 struct function_info *caller_start;
2824 callee_start = callee->fun;
2825 while (callee_start->start)
2826 callee_start = callee_start->start;
2827 caller_start = caller;
2828 while (caller_start->start)
2829 caller_start = caller_start->start;
2830 if (caller_start != callee_start)
2832 callee->fun->start = NULL;
2833 callee->fun->is_func = TRUE;
2842 /* Handle something like .init or .fini, which has a piece of a function.
2843 These sections are pasted together to form a single function. */
2846 pasted_function (asection *sec)
2848 struct bfd_link_order *l;
2849 struct _spu_elf_section_data *sec_data;
2850 struct spu_elf_stack_info *sinfo;
2851 Elf_Internal_Sym *fake;
2852 struct function_info *fun, *fun_start;
2854 fake = bfd_zmalloc (sizeof (*fake));
2858 fake->st_size = sec->size;
2860 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2861 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2865 /* Find a function immediately preceding this section. */
2867 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2869 if (l->u.indirect.section == sec)
2871 if (fun_start != NULL)
2873 struct call_info *callee = bfd_malloc (sizeof *callee);
2877 fun->start = fun_start;
2879 callee->is_tail = TRUE;
2880 callee->is_pasted = TRUE;
2882 if (!insert_callee (fun_start, callee))
2888 if (l->type == bfd_indirect_link_order
2889 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2890 && (sinfo = sec_data->u.i.stack_info) != NULL
2891 && sinfo->num_fun != 0)
2892 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2895 /* Don't return an error if we did not find a function preceding this
2896 section. The section may have incorrect flags. */
2900 /* Map address ranges in code sections to functions. */
2903 discover_functions (struct bfd_link_info *info)
2907 Elf_Internal_Sym ***psym_arr;
2908 asection ***sec_arr;
2909 bfd_boolean gaps = FALSE;
2912 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2915 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2916 if (psym_arr == NULL)
2918 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2919 if (sec_arr == NULL)
2922 for (ibfd = info->input_bfds, bfd_idx = 0;
2924 ibfd = ibfd->link_next, bfd_idx++)
2926 extern const bfd_target bfd_elf32_spu_vec;
2927 Elf_Internal_Shdr *symtab_hdr;
2930 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2931 asection **psecs, **p;
2933 if (ibfd->xvec != &bfd_elf32_spu_vec)
2936 /* Read all the symbols. */
2937 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2938 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2942 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2943 if (interesting_section (sec))
2951 if (symtab_hdr->contents != NULL)
2953 /* Don't use cached symbols since the generic ELF linker
2954 code only reads local symbols, and we need globals too. */
2955 free (symtab_hdr->contents);
2956 symtab_hdr->contents = NULL;
2958 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2960 symtab_hdr->contents = (void *) syms;
2964 /* Select defined function symbols that are going to be output. */
2965 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2968 psym_arr[bfd_idx] = psyms;
2969 psecs = bfd_malloc (symcount * sizeof (*psecs));
2972 sec_arr[bfd_idx] = psecs;
2973 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2974 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2975 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2979 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2980 if (s != NULL && interesting_section (s))
2983 symcount = psy - psyms;
2986 /* Sort them by section and offset within section. */
2987 sort_syms_syms = syms;
2988 sort_syms_psecs = psecs;
2989 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2991 /* Now inspect the function symbols. */
2992 for (psy = psyms; psy < psyms + symcount; )
2994 asection *s = psecs[*psy - syms];
2995 Elf_Internal_Sym **psy2;
2997 for (psy2 = psy; ++psy2 < psyms + symcount; )
2998 if (psecs[*psy2 - syms] != s)
3001 if (!alloc_stack_info (s, psy2 - psy))
3006 /* First install info about properly typed and sized functions.
3007 In an ideal world this will cover all code sections, except
3008 when partitioning functions into hot and cold sections,
3009 and the horrible pasted together .init and .fini functions. */
3010 for (psy = psyms; psy < psyms + symcount; ++psy)
3013 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3015 asection *s = psecs[sy - syms];
3016 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3021 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3022 if (interesting_section (sec))
3023 gaps |= check_function_ranges (sec, info);
3028 /* See if we can discover more function symbols by looking at
3030 for (ibfd = info->input_bfds, bfd_idx = 0;
3032 ibfd = ibfd->link_next, bfd_idx++)
3036 if (psym_arr[bfd_idx] == NULL)
3039 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3040 if (!mark_functions_via_relocs (sec, info, FALSE))
3044 for (ibfd = info->input_bfds, bfd_idx = 0;
3046 ibfd = ibfd->link_next, bfd_idx++)
3048 Elf_Internal_Shdr *symtab_hdr;
3050 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3053 if ((psyms = psym_arr[bfd_idx]) == NULL)
3056 psecs = sec_arr[bfd_idx];
3058 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3059 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3062 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3063 if (interesting_section (sec))
3064 gaps |= check_function_ranges (sec, info);
3068 /* Finally, install all globals. */
3069 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3073 s = psecs[sy - syms];
3075 /* Global syms might be improperly typed functions. */
3076 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3077 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3079 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3085 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3087 extern const bfd_target bfd_elf32_spu_vec;
3090 if (ibfd->xvec != &bfd_elf32_spu_vec)
3093 /* Some of the symbols we've installed as marking the
3094 beginning of functions may have a size of zero. Extend
3095 the range of such functions to the beginning of the
3096 next symbol of interest. */
3097 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3098 if (interesting_section (sec))
3100 struct _spu_elf_section_data *sec_data;
3101 struct spu_elf_stack_info *sinfo;
3103 sec_data = spu_elf_section_data (sec);
3104 sinfo = sec_data->u.i.stack_info;
3105 if (sinfo != NULL && sinfo->num_fun != 0)
3108 bfd_vma hi = sec->size;
3110 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3112 sinfo->fun[fun_idx].hi = hi;
3113 hi = sinfo->fun[fun_idx].lo;
3116 sinfo->fun[0].lo = 0;
3118 /* No symbols in this section. Must be .init or .fini
3119 or something similar. */
3120 else if (!pasted_function (sec))
3126 for (ibfd = info->input_bfds, bfd_idx = 0;
3128 ibfd = ibfd->link_next, bfd_idx++)
3130 if (psym_arr[bfd_idx] == NULL)
3133 free (psym_arr[bfd_idx]);
3134 free (sec_arr[bfd_idx]);
3143 /* Iterate over all function_info we have collected, calling DOIT on
3144 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3148 for_each_node (bfd_boolean (*doit) (struct function_info *,
3149 struct bfd_link_info *,
3151 struct bfd_link_info *info,
3157 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3159 extern const bfd_target bfd_elf32_spu_vec;
3162 if (ibfd->xvec != &bfd_elf32_spu_vec)
3165 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3167 struct _spu_elf_section_data *sec_data;
3168 struct spu_elf_stack_info *sinfo;
3170 if ((sec_data = spu_elf_section_data (sec)) != NULL
3171 && (sinfo = sec_data->u.i.stack_info) != NULL)
3174 for (i = 0; i < sinfo->num_fun; ++i)
3175 if (!root_only || !sinfo->fun[i].non_root)
3176 if (!doit (&sinfo->fun[i], info, param))
3184 /* Transfer call info attached to struct function_info entries for
3185 all of a given function's sections to the first entry. */
3188 transfer_calls (struct function_info *fun,
3189 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3190 void *param ATTRIBUTE_UNUSED)
3192 struct function_info *start = fun->start;
3196 struct call_info *call, *call_next;
3198 while (start->start != NULL)
3199 start = start->start;
3200 for (call = fun->call_list; call != NULL; call = call_next)
3202 call_next = call->next;
3203 if (!insert_callee (start, call))
3206 fun->call_list = NULL;
3211 /* Mark nodes in the call graph that are called by some other node. */
3214 mark_non_root (struct function_info *fun,
3215 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3216 void *param ATTRIBUTE_UNUSED)
3218 struct call_info *call;
3223 for (call = fun->call_list; call; call = call->next)
3225 call->fun->non_root = TRUE;
3226 mark_non_root (call->fun, 0, 0);
3231 /* Remove cycles from the call graph. Set depth of nodes. */
3234 remove_cycles (struct function_info *fun,
3235 struct bfd_link_info *info,
3238 struct call_info **callp, *call;
3239 unsigned int depth = *(unsigned int *) param;
3240 unsigned int max_depth = depth;
3244 fun->marking = TRUE;
3246 callp = &fun->call_list;
3247 while ((call = *callp) != NULL)
3249 call->max_depth = depth + !call->is_pasted;
3250 if (!call->fun->visit2)
3252 if (!remove_cycles (call->fun, info, &call->max_depth))
3254 if (max_depth < call->max_depth)
3255 max_depth = call->max_depth;
3257 else if (call->fun->marking)
3259 struct spu_link_hash_table *htab = spu_hash_table (info);
3261 if (!htab->params->auto_overlay
3262 && htab->params->stack_analysis)
3264 const char *f1 = func_name (fun);
3265 const char *f2 = func_name (call->fun);
3267 info->callbacks->info (_("Stack analysis will ignore the call "
3272 call->broken_cycle = TRUE;
3274 callp = &call->next;
3276 fun->marking = FALSE;
3277 *(unsigned int *) param = max_depth;
3281 /* Check that we actually visited all nodes in remove_cycles. If we
3282 didn't, then there is some cycle in the call graph not attached to
3283 any root node. Arbitrarily choose a node in the cycle as a new
3284 root and break the cycle. */
3287 mark_detached_root (struct function_info *fun,
3288 struct bfd_link_info *info,
3293 fun->non_root = FALSE;
3294 *(unsigned int *) param = 0;
3295 return remove_cycles (fun, info, param);
3298 /* Populate call_list for each function. */
3301 build_call_tree (struct bfd_link_info *info)
3306 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3308 extern const bfd_target bfd_elf32_spu_vec;
3311 if (ibfd->xvec != &bfd_elf32_spu_vec)
3314 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3315 if (!mark_functions_via_relocs (sec, info, TRUE))
3319 /* Transfer call info from hot/cold section part of function
3321 if (!spu_hash_table (info)->params->auto_overlay
3322 && !for_each_node (transfer_calls, info, 0, FALSE))
3325 /* Find the call graph root(s). */
3326 if (!for_each_node (mark_non_root, info, 0, FALSE))
3329 /* Remove cycles from the call graph. We start from the root node(s)
3330 so that we break cycles in a reasonable place. */
3332 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3335 return for_each_node (mark_detached_root, info, &depth, FALSE);
3338 /* qsort predicate to sort calls by priority, max_depth then count. */
3341 sort_calls (const void *a, const void *b)
3343 struct call_info *const *c1 = a;
3344 struct call_info *const *c2 = b;
3347 delta = (*c2)->priority - (*c1)->priority;
3351 delta = (*c2)->max_depth - (*c1)->max_depth;
3355 delta = (*c2)->count - (*c1)->count;
3359 return (char *) c1 - (char *) c2;
3363 unsigned int max_overlay_size;
3366 /* Set linker_mark and gc_mark on any sections that we will put in
3367 overlays. These flags are used by the generic ELF linker, but we
3368 won't be continuing on to bfd_elf_final_link so it is OK to use
3369 them. linker_mark is clear before we get here. Set segment_mark
3370 on sections that are part of a pasted function (excluding the last
3373 Set up function rodata section if --overlay-rodata. We don't
3374 currently include merged string constant rodata sections since
3376 Sort the call graph so that the deepest nodes will be visited
3380 mark_overlay_section (struct function_info *fun,
3381 struct bfd_link_info *info,
3384 struct call_info *call;
3386 struct _mos_param *mos_param = param;
3387 struct spu_link_hash_table *htab = spu_hash_table (info);
3393 if (!fun->sec->linker_mark
3394 && (htab->params->ovly_flavour != ovly_soft_icache
3395 || htab->params->non_ia_text
3396 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3397 || strcmp (fun->sec->name, ".init") == 0
3398 || strcmp (fun->sec->name, ".fini") == 0))
3402 fun->sec->linker_mark = 1;
3403 fun->sec->gc_mark = 1;
3404 fun->sec->segment_mark = 0;
3405 /* Ensure SEC_CODE is set on this text section (it ought to
3406 be!), and SEC_CODE is clear on rodata sections. We use
3407 this flag to differentiate the two overlay section types. */
3408 fun->sec->flags |= SEC_CODE;
3410 size = fun->sec->size;
3411 if (htab->params->auto_overlay & OVERLAY_RODATA)
3415 /* Find the rodata section corresponding to this function's
3417 if (strcmp (fun->sec->name, ".text") == 0)
3419 name = bfd_malloc (sizeof (".rodata"));
3422 memcpy (name, ".rodata", sizeof (".rodata"));
3424 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3426 size_t len = strlen (fun->sec->name);
3427 name = bfd_malloc (len + 3);
3430 memcpy (name, ".rodata", sizeof (".rodata"));
3431 memcpy (name + 7, fun->sec->name + 5, len - 4);
3433 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3435 size_t len = strlen (fun->sec->name) + 1;
3436 name = bfd_malloc (len);
3439 memcpy (name, fun->sec->name, len);
3445 asection *rodata = NULL;
3446 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3447 if (group_sec == NULL)
3448 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3450 while (group_sec != NULL && group_sec != fun->sec)
3452 if (strcmp (group_sec->name, name) == 0)
3457 group_sec = elf_section_data (group_sec)->next_in_group;
3459 fun->rodata = rodata;
3462 size += fun->rodata->size;
3463 if (htab->params->line_size != 0
3464 && size > htab->params->line_size)
3466 size -= fun->rodata->size;
3471 fun->rodata->linker_mark = 1;
3472 fun->rodata->gc_mark = 1;
3473 fun->rodata->flags &= ~SEC_CODE;
3479 if (mos_param->max_overlay_size < size)
3480 mos_param->max_overlay_size = size;
3483 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3488 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3492 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3493 calls[count++] = call;
3495 qsort (calls, count, sizeof (*calls), sort_calls);
3497 fun->call_list = NULL;
3501 calls[count]->next = fun->call_list;
3502 fun->call_list = calls[count];
3507 for (call = fun->call_list; call != NULL; call = call->next)
3509 if (call->is_pasted)
3511 /* There can only be one is_pasted call per function_info. */
3512 BFD_ASSERT (!fun->sec->segment_mark);
3513 fun->sec->segment_mark = 1;
3515 if (!call->broken_cycle
3516 && !mark_overlay_section (call->fun, info, param))
3520 /* Don't put entry code into an overlay. The overlay manager needs
3521 a stack! Also, don't mark .ovl.init as an overlay. */
3522 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3523 == info->output_bfd->start_address
3524 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3526 fun->sec->linker_mark = 0;
3527 if (fun->rodata != NULL)
3528 fun->rodata->linker_mark = 0;
3533 /* If non-zero then unmark functions called from those within sections
3534 that we need to unmark. Unfortunately this isn't reliable since the
3535 call graph cannot know the destination of function pointer calls. */
3536 #define RECURSE_UNMARK 0
3539 asection *exclude_input_section;
3540 asection *exclude_output_section;
3541 unsigned long clearing;
3544 /* Undo some of mark_overlay_section's work. */
3547 unmark_overlay_section (struct function_info *fun,
3548 struct bfd_link_info *info,
3551 struct call_info *call;
3552 struct _uos_param *uos_param = param;
3553 unsigned int excluded = 0;
3561 if (fun->sec == uos_param->exclude_input_section
3562 || fun->sec->output_section == uos_param->exclude_output_section)
3566 uos_param->clearing += excluded;
3568 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3570 fun->sec->linker_mark = 0;
3572 fun->rodata->linker_mark = 0;
3575 for (call = fun->call_list; call != NULL; call = call->next)
3576 if (!call->broken_cycle
3577 && !unmark_overlay_section (call->fun, info, param))
3581 uos_param->clearing -= excluded;
3586 unsigned int lib_size;
3587 asection **lib_sections;
3590 /* Add sections we have marked as belonging to overlays to an array
3591 for consideration as non-overlay sections. The array consist of
3592 pairs of sections, (text,rodata), for functions in the call graph. */
3595 collect_lib_sections (struct function_info *fun,
3596 struct bfd_link_info *info,
3599 struct _cl_param *lib_param = param;
3600 struct call_info *call;
3607 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3610 size = fun->sec->size;
3612 size += fun->rodata->size;
3614 if (size <= lib_param->lib_size)
3616 *lib_param->lib_sections++ = fun->sec;
3617 fun->sec->gc_mark = 0;
3618 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3620 *lib_param->lib_sections++ = fun->rodata;
3621 fun->rodata->gc_mark = 0;
3624 *lib_param->lib_sections++ = NULL;
3627 for (call = fun->call_list; call != NULL; call = call->next)
3628 if (!call->broken_cycle)
3629 collect_lib_sections (call->fun, info, param);
3634 /* qsort predicate to sort sections by call count. */
3637 sort_lib (const void *a, const void *b)
3639 asection *const *s1 = a;
3640 asection *const *s2 = b;
3641 struct _spu_elf_section_data *sec_data;
3642 struct spu_elf_stack_info *sinfo;
3646 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3647 && (sinfo = sec_data->u.i.stack_info) != NULL)
3650 for (i = 0; i < sinfo->num_fun; ++i)
3651 delta -= sinfo->fun[i].call_count;
3654 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3655 && (sinfo = sec_data->u.i.stack_info) != NULL)
3658 for (i = 0; i < sinfo->num_fun; ++i)
3659 delta += sinfo->fun[i].call_count;
3668 /* Remove some sections from those marked to be in overlays. Choose
3669 those that are called from many places, likely library functions. */
3672 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3675 asection **lib_sections;
3676 unsigned int i, lib_count;
3677 struct _cl_param collect_lib_param;
3678 struct function_info dummy_caller;
3679 struct spu_link_hash_table *htab;
3681 memset (&dummy_caller, 0, sizeof (dummy_caller));
3683 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3685 extern const bfd_target bfd_elf32_spu_vec;
3688 if (ibfd->xvec != &bfd_elf32_spu_vec)
3691 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3692 if (sec->linker_mark
3693 && sec->size < lib_size
3694 && (sec->flags & SEC_CODE) != 0)
3697 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3698 if (lib_sections == NULL)
3699 return (unsigned int) -1;
3700 collect_lib_param.lib_size = lib_size;
3701 collect_lib_param.lib_sections = lib_sections;
3702 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3704 return (unsigned int) -1;
3705 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3707 /* Sort sections so that those with the most calls are first. */
3709 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3711 htab = spu_hash_table (info);
3712 for (i = 0; i < lib_count; i++)
3714 unsigned int tmp, stub_size;
3716 struct _spu_elf_section_data *sec_data;
3717 struct spu_elf_stack_info *sinfo;
3719 sec = lib_sections[2 * i];
3720 /* If this section is OK, its size must be less than lib_size. */
3722 /* If it has a rodata section, then add that too. */
3723 if (lib_sections[2 * i + 1])
3724 tmp += lib_sections[2 * i + 1]->size;
3725 /* Add any new overlay call stubs needed by the section. */
3728 && (sec_data = spu_elf_section_data (sec)) != NULL
3729 && (sinfo = sec_data->u.i.stack_info) != NULL)
3732 struct call_info *call;
3734 for (k = 0; k < sinfo->num_fun; ++k)
3735 for (call = sinfo->fun[k].call_list; call; call = call->next)
3736 if (call->fun->sec->linker_mark)
3738 struct call_info *p;
3739 for (p = dummy_caller.call_list; p; p = p->next)
3740 if (p->fun == call->fun)
3743 stub_size += ovl_stub_size (htab->params);
3746 if (tmp + stub_size < lib_size)
3748 struct call_info **pp, *p;
3750 /* This section fits. Mark it as non-overlay. */
3751 lib_sections[2 * i]->linker_mark = 0;
3752 if (lib_sections[2 * i + 1])
3753 lib_sections[2 * i + 1]->linker_mark = 0;
3754 lib_size -= tmp + stub_size;
3755 /* Call stubs to the section we just added are no longer
3757 pp = &dummy_caller.call_list;
3758 while ((p = *pp) != NULL)
3759 if (!p->fun->sec->linker_mark)
3761 lib_size += ovl_stub_size (htab->params);
3767 /* Add new call stubs to dummy_caller. */
3768 if ((sec_data = spu_elf_section_data (sec)) != NULL
3769 && (sinfo = sec_data->u.i.stack_info) != NULL)
3772 struct call_info *call;
3774 for (k = 0; k < sinfo->num_fun; ++k)
3775 for (call = sinfo->fun[k].call_list;
3778 if (call->fun->sec->linker_mark)
3780 struct call_info *callee;
3781 callee = bfd_malloc (sizeof (*callee));
3783 return (unsigned int) -1;
3785 if (!insert_callee (&dummy_caller, callee))
3791 while (dummy_caller.call_list != NULL)
3793 struct call_info *call = dummy_caller.call_list;
3794 dummy_caller.call_list = call->next;
3797 for (i = 0; i < 2 * lib_count; i++)
3798 if (lib_sections[i])
3799 lib_sections[i]->gc_mark = 1;
3800 free (lib_sections);
3804 /* Build an array of overlay sections. The deepest node's section is
3805 added first, then its parent node's section, then everything called
3806 from the parent section. The idea being to group sections to
3807 minimise calls between different overlays. */
3810 collect_overlays (struct function_info *fun,
3811 struct bfd_link_info *info,
3814 struct call_info *call;
3815 bfd_boolean added_fun;
3816 asection ***ovly_sections = param;
3822 for (call = fun->call_list; call != NULL; call = call->next)
3823 if (!call->is_pasted && !call->broken_cycle)
3825 if (!collect_overlays (call->fun, info, ovly_sections))
3831 if (fun->sec->linker_mark && fun->sec->gc_mark)
3833 fun->sec->gc_mark = 0;
3834 *(*ovly_sections)++ = fun->sec;
3835 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3837 fun->rodata->gc_mark = 0;
3838 *(*ovly_sections)++ = fun->rodata;
3841 *(*ovly_sections)++ = NULL;
3844 /* Pasted sections must stay with the first section. We don't
3845 put pasted sections in the array, just the first section.
3846 Mark subsequent sections as already considered. */
3847 if (fun->sec->segment_mark)
3849 struct function_info *call_fun = fun;
3852 for (call = call_fun->call_list; call != NULL; call = call->next)
3853 if (call->is_pasted)
3855 call_fun = call->fun;
3856 call_fun->sec->gc_mark = 0;
3857 if (call_fun->rodata)
3858 call_fun->rodata->gc_mark = 0;
3864 while (call_fun->sec->segment_mark);
3868 for (call = fun->call_list; call != NULL; call = call->next)
3869 if (!call->broken_cycle
3870 && !collect_overlays (call->fun, info, ovly_sections))
3875 struct _spu_elf_section_data *sec_data;
3876 struct spu_elf_stack_info *sinfo;
3878 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3879 && (sinfo = sec_data->u.i.stack_info) != NULL)
3882 for (i = 0; i < sinfo->num_fun; ++i)
3883 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3891 struct _sum_stack_param {
3893 size_t overall_stack;
3894 bfd_boolean emit_stack_syms;
3897 /* Descend the call graph for FUN, accumulating total stack required. */
3900 sum_stack (struct function_info *fun,
3901 struct bfd_link_info *info,
3904 struct call_info *call;
3905 struct function_info *max;
3906 size_t stack, cum_stack;
3908 bfd_boolean has_call;
3909 struct _sum_stack_param *sum_stack_param = param;
3910 struct spu_link_hash_table *htab;
3912 cum_stack = fun->stack;
3913 sum_stack_param->cum_stack = cum_stack;
3919 for (call = fun->call_list; call; call = call->next)
3921 if (call->broken_cycle)
3923 if (!call->is_pasted)
3925 if (!sum_stack (call->fun, info, sum_stack_param))
3927 stack = sum_stack_param->cum_stack;
3928 /* Include caller stack for normal calls, don't do so for
3929 tail calls. fun->stack here is local stack usage for
3931 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3932 stack += fun->stack;
3933 if (cum_stack < stack)
3940 sum_stack_param->cum_stack = cum_stack;
3942 /* Now fun->stack holds cumulative stack. */
3943 fun->stack = cum_stack;
3947 && sum_stack_param->overall_stack < cum_stack)
3948 sum_stack_param->overall_stack = cum_stack;
3950 htab = spu_hash_table (info);
3951 if (htab->params->auto_overlay)
3954 f1 = func_name (fun);
3955 if (htab->params->stack_analysis)
3958 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3959 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3960 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3964 info->callbacks->minfo (_(" calls:\n"));
3965 for (call = fun->call_list; call; call = call->next)
3966 if (!call->is_pasted && !call->broken_cycle)
3968 const char *f2 = func_name (call->fun);
3969 const char *ann1 = call->fun == max ? "*" : " ";
3970 const char *ann2 = call->is_tail ? "t" : " ";
3972 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3977 if (sum_stack_param->emit_stack_syms)
3979 char *name = bfd_malloc (18 + strlen (f1));
3980 struct elf_link_hash_entry *h;
3985 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3986 sprintf (name, "__stack_%s", f1);
3988 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3990 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3993 && (h->root.type == bfd_link_hash_new
3994 || h->root.type == bfd_link_hash_undefined
3995 || h->root.type == bfd_link_hash_undefweak))
3997 h->root.type = bfd_link_hash_defined;
3998 h->root.u.def.section = bfd_abs_section_ptr;
3999 h->root.u.def.value = cum_stack;
4004 h->ref_regular_nonweak = 1;
4005 h->forced_local = 1;
4013 /* SEC is part of a pasted function. Return the call_info for the
4014 next section of this function. */
4016 static struct call_info *
4017 find_pasted_call (asection *sec)
4019 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4020 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4021 struct call_info *call;
4024 for (k = 0; k < sinfo->num_fun; ++k)
4025 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4026 if (call->is_pasted)
4032 /* qsort predicate to sort bfds by file name. */
4035 sort_bfds (const void *a, const void *b)
4037 bfd *const *abfd1 = a;
4038 bfd *const *abfd2 = b;
4040 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
4044 print_one_overlay_section (FILE *script,
4047 unsigned int ovlynum,
4048 unsigned int *ovly_map,
4049 asection **ovly_sections,
4050 struct bfd_link_info *info)
4054 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4056 asection *sec = ovly_sections[2 * j];
4058 if (fprintf (script, " %s%c%s (%s)\n",
4059 (sec->owner->my_archive != NULL
4060 ? sec->owner->my_archive->filename : ""),
4061 info->path_separator,
4062 sec->owner->filename,
4065 if (sec->segment_mark)
4067 struct call_info *call = find_pasted_call (sec);
4068 while (call != NULL)
4070 struct function_info *call_fun = call->fun;
4071 sec = call_fun->sec;
4072 if (fprintf (script, " %s%c%s (%s)\n",
4073 (sec->owner->my_archive != NULL
4074 ? sec->owner->my_archive->filename : ""),
4075 info->path_separator,
4076 sec->owner->filename,
4079 for (call = call_fun->call_list; call; call = call->next)
4080 if (call->is_pasted)
4086 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4088 asection *sec = ovly_sections[2 * j + 1];
4090 && fprintf (script, " %s%c%s (%s)\n",
4091 (sec->owner->my_archive != NULL
4092 ? sec->owner->my_archive->filename : ""),
4093 info->path_separator,
4094 sec->owner->filename,
4098 sec = ovly_sections[2 * j];
4099 if (sec->segment_mark)
4101 struct call_info *call = find_pasted_call (sec);
4102 while (call != NULL)
4104 struct function_info *call_fun = call->fun;
4105 sec = call_fun->rodata;
4107 && fprintf (script, " %s%c%s (%s)\n",
4108 (sec->owner->my_archive != NULL
4109 ? sec->owner->my_archive->filename : ""),
4110 info->path_separator,
4111 sec->owner->filename,
4114 for (call = call_fun->call_list; call; call = call->next)
4115 if (call->is_pasted)
4124 /* Handle --auto-overlay. */
4127 spu_elf_auto_overlay (struct bfd_link_info *info)
4131 struct elf_segment_map *m;
4132 unsigned int fixed_size, lo, hi;
4133 struct spu_link_hash_table *htab;
4134 unsigned int base, i, count, bfd_count;
4135 unsigned int region, ovlynum;
4136 asection **ovly_sections, **ovly_p;
4137 unsigned int *ovly_map;
4139 unsigned int total_overlay_size, overlay_size;
4140 const char *ovly_mgr_entry;
4141 struct elf_link_hash_entry *h;
4142 struct _mos_param mos_param;
4143 struct _uos_param uos_param;
4144 struct function_info dummy_caller;
4146 /* Find the extents of our loadable image. */
4147 lo = (unsigned int) -1;
4149 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4150 if (m->p_type == PT_LOAD)
4151 for (i = 0; i < m->count; i++)
4152 if (m->sections[i]->size != 0)
4154 if (m->sections[i]->vma < lo)
4155 lo = m->sections[i]->vma;
4156 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4157 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4159 fixed_size = hi + 1 - lo;
4161 if (!discover_functions (info))
4164 if (!build_call_tree (info))
4167 htab = spu_hash_table (info);
4168 if (htab->reserved == 0)
4170 struct _sum_stack_param sum_stack_param;
4172 sum_stack_param.emit_stack_syms = 0;
4173 sum_stack_param.overall_stack = 0;
4174 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4176 htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
4179 /* No need for overlays if everything already fits. */
4180 if (fixed_size + htab->reserved <= htab->local_store
4181 && htab->params->ovly_flavour != ovly_soft_icache)
4183 htab->params->auto_overlay = 0;
4187 uos_param.exclude_input_section = 0;
4188 uos_param.exclude_output_section
4189 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4191 ovly_mgr_entry = "__ovly_load";
4192 if (htab->params->ovly_flavour == ovly_soft_icache)
4193 ovly_mgr_entry = "__icache_br_handler";
4194 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4195 FALSE, FALSE, FALSE);
4197 && (h->root.type == bfd_link_hash_defined
4198 || h->root.type == bfd_link_hash_defweak)
4201 /* We have a user supplied overlay manager. */
4202 uos_param.exclude_input_section = h->root.u.def.section;
4206 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4207 builtin version to .text, and will adjust .text size. */
4208 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4211 /* Mark overlay sections, and find max overlay section size. */
4212 mos_param.max_overlay_size = 0;
4213 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4216 /* We can't put the overlay manager or interrupt routines in
4218 uos_param.clearing = 0;
4219 if ((uos_param.exclude_input_section
4220 || uos_param.exclude_output_section)
4221 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4225 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4227 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4228 if (bfd_arr == NULL)
4231 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4234 total_overlay_size = 0;
4235 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4237 extern const bfd_target bfd_elf32_spu_vec;
4239 unsigned int old_count;
4241 if (ibfd->xvec != &bfd_elf32_spu_vec)
4245 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4246 if (sec->linker_mark)
4248 if ((sec->flags & SEC_CODE) != 0)
4250 fixed_size -= sec->size;
4251 total_overlay_size += sec->size;
4253 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4254 && sec->output_section->owner == info->output_bfd
4255 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4256 fixed_size -= sec->size;
4257 if (count != old_count)
4258 bfd_arr[bfd_count++] = ibfd;
4261 /* Since the overlay link script selects sections by file name and
4262 section name, ensure that file names are unique. */
4265 bfd_boolean ok = TRUE;
4267 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4268 for (i = 1; i < bfd_count; ++i)
4269 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4271 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4273 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4274 info->callbacks->einfo (_("%s duplicated in %s\n"),
4275 bfd_arr[i]->filename,
4276 bfd_arr[i]->my_archive->filename);
4278 info->callbacks->einfo (_("%s duplicated\n"),
4279 bfd_arr[i]->filename);
4285 info->callbacks->einfo (_("sorry, no support for duplicate "
4286 "object files in auto-overlay script\n"));
4287 bfd_set_error (bfd_error_bad_value);
4293 fixed_size += htab->reserved;
4294 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4295 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4297 if (htab->params->ovly_flavour == ovly_soft_icache)
4299 /* Stubs in the non-icache area are bigger. */
4300 fixed_size += htab->non_ovly_stub * 16;
4301 /* Space for icache manager tables.
4302 a) Tag array, one quadword per cache line.
4303 - word 0: ia address of present line, init to zero. */
4304 fixed_size += 16 << htab->num_lines_log2;
4305 /* b) Rewrite "to" list, one quadword per cache line. */
4306 fixed_size += 16 << htab->num_lines_log2;
4307 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4308 to a power-of-two number of full quadwords) per cache line. */
4309 fixed_size += 16 << (htab->fromelem_size_log2
4310 + htab->num_lines_log2);
4311 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4316 /* Guess number of overlays. Assuming overlay buffer is on
4317 average only half full should be conservative. */
4318 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4319 / (htab->local_store - fixed_size));
4320 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4321 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4325 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4326 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4327 "size of 0x%v exceeds local store\n"),
4328 (bfd_vma) fixed_size,
4329 (bfd_vma) mos_param.max_overlay_size);
4331 /* Now see if we should put some functions in the non-overlay area. */
4332 else if (fixed_size < htab->overlay_fixed)
4334 unsigned int max_fixed, lib_size;
4336 max_fixed = htab->local_store - mos_param.max_overlay_size;
4337 if (max_fixed > htab->overlay_fixed)
4338 max_fixed = htab->overlay_fixed;
4339 lib_size = max_fixed - fixed_size;
4340 lib_size = auto_ovl_lib_functions (info, lib_size);
4341 if (lib_size == (unsigned int) -1)
4343 fixed_size = max_fixed - lib_size;
4346 /* Build an array of sections, suitably sorted to place into
4348 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4349 if (ovly_sections == NULL)
4351 ovly_p = ovly_sections;
4352 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4354 count = (size_t) (ovly_p - ovly_sections) / 2;
4355 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4356 if (ovly_map == NULL)
4359 memset (&dummy_caller, 0, sizeof (dummy_caller));
4360 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4361 if (htab->params->line_size != 0)
4362 overlay_size = htab->params->line_size;
4365 while (base < count)
4367 unsigned int size = 0, rosize = 0, roalign = 0;
4369 for (i = base; i < count; i++)
4371 asection *sec, *rosec;
4372 unsigned int tmp, rotmp;
4373 unsigned int num_stubs;
4374 struct call_info *call, *pasty;
4375 struct _spu_elf_section_data *sec_data;
4376 struct spu_elf_stack_info *sinfo;
4379 /* See whether we can add this section to the current
4380 overlay without overflowing our overlay buffer. */
4381 sec = ovly_sections[2 * i];
4382 tmp = align_power (size, sec->alignment_power) + sec->size;
4384 rosec = ovly_sections[2 * i + 1];
4387 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4388 if (roalign < rosec->alignment_power)
4389 roalign = rosec->alignment_power;
4391 if (align_power (tmp, roalign) + rotmp > overlay_size)
4393 if (sec->segment_mark)
4395 /* Pasted sections must stay together, so add their
4397 struct call_info *pasty = find_pasted_call (sec);
4398 while (pasty != NULL)
4400 struct function_info *call_fun = pasty->fun;
4401 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4402 + call_fun->sec->size);
4403 if (call_fun->rodata)
4405 rotmp = (align_power (rotmp,
4406 call_fun->rodata->alignment_power)
4407 + call_fun->rodata->size);
4408 if (roalign < rosec->alignment_power)
4409 roalign = rosec->alignment_power;
4411 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4412 if (pasty->is_pasted)
4416 if (align_power (tmp, roalign) + rotmp > overlay_size)
4419 /* If we add this section, we might need new overlay call
4420 stubs. Add any overlay section calls to dummy_call. */
4422 sec_data = spu_elf_section_data (sec);
4423 sinfo = sec_data->u.i.stack_info;
4424 for (k = 0; k < sinfo->num_fun; ++k)
4425 for (call = sinfo->fun[k].call_list; call; call = call->next)
4426 if (call->is_pasted)
4428 BFD_ASSERT (pasty == NULL);
4431 else if (call->fun->sec->linker_mark)
4433 if (!copy_callee (&dummy_caller, call))
4436 while (pasty != NULL)
4438 struct function_info *call_fun = pasty->fun;
4440 for (call = call_fun->call_list; call; call = call->next)
4441 if (call->is_pasted)
4443 BFD_ASSERT (pasty == NULL);
4446 else if (!copy_callee (&dummy_caller, call))
4450 /* Calculate call stub size. */
4452 for (call = dummy_caller.call_list; call; call = call->next)
4455 unsigned int stub_delta = 1;
4457 if (htab->params->ovly_flavour == ovly_soft_icache)
4458 stub_delta = call->count;
4459 num_stubs += stub_delta;
4461 /* If the call is within this overlay, we won't need a
4463 for (k = base; k < i + 1; k++)
4464 if (call->fun->sec == ovly_sections[2 * k])
4466 num_stubs -= stub_delta;
4470 if (htab->params->ovly_flavour == ovly_soft_icache
4471 && num_stubs > htab->params->max_branch)
4473 if (align_power (tmp, roalign) + rotmp
4474 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4482 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4483 ovly_sections[2 * i]->owner,
4484 ovly_sections[2 * i],
4485 ovly_sections[2 * i + 1] ? " + rodata" : "");
4486 bfd_set_error (bfd_error_bad_value);
4490 while (dummy_caller.call_list != NULL)
4492 struct call_info *call = dummy_caller.call_list;
4493 dummy_caller.call_list = call->next;
4499 ovly_map[base++] = ovlynum;
4502 script = htab->params->spu_elf_open_overlay_script ();
4504 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4507 if (htab->params->ovly_flavour == ovly_soft_icache)
4509 if (fprintf (script,
4510 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4511 " . = ALIGN (%u);\n"
4512 " .ovl.init : { *(.ovl.init) }\n"
4513 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4514 htab->params->line_size) <= 0)
4519 while (base < count)
4521 unsigned int indx = ovlynum - 1;
4522 unsigned int vma, lma;
4524 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4525 lma = indx << htab->line_size_log2;
4527 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4528 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4529 ovlynum, vma, lma) <= 0)
4532 base = print_one_overlay_section (script, base, count, ovlynum,
4533 ovly_map, ovly_sections, info);
4534 if (base == (unsigned) -1)
4537 if (fprintf (script, " }\n") <= 0)
4543 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4544 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4549 if (fprintf (script,
4550 " . = ALIGN (16);\n"
4551 " .ovl.init : { *(.ovl.init) }\n"
4552 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4555 for (region = 1; region <= htab->params->num_lines; region++)
4559 while (base < count && ovly_map[base] < ovlynum)
4567 /* We need to set lma since we are overlaying .ovl.init. */
4568 if (fprintf (script,
4569 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4574 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4578 while (base < count)
4580 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4583 base = print_one_overlay_section (script, base, count, ovlynum,
4584 ovly_map, ovly_sections, info);
4585 if (base == (unsigned) -1)
4588 if (fprintf (script, " }\n") <= 0)
4591 ovlynum += htab->params->num_lines;
4592 while (base < count && ovly_map[base] < ovlynum)
4596 if (fprintf (script, " }\n") <= 0)
4603 free (ovly_sections);
4605 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4607 if (fclose (script) != 0)
4610 if (htab->params->auto_overlay & AUTO_RELINK)
4611 (*htab->params->spu_elf_relink) ();
4616 bfd_set_error (bfd_error_system_call);
4618 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4622 /* Provide an estimate of total stack required. */
4625 spu_elf_stack_analysis (struct bfd_link_info *info)
4627 struct spu_link_hash_table *htab;
4628 struct _sum_stack_param sum_stack_param;
4630 if (!discover_functions (info))
4633 if (!build_call_tree (info))
4636 htab = spu_hash_table (info);
4637 if (htab->params->stack_analysis)
4639 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4640 info->callbacks->minfo (_("\nStack size for functions. "
4641 "Annotations: '*' max stack, 't' tail call\n"));
4644 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4645 sum_stack_param.overall_stack = 0;
4646 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4649 if (htab->params->stack_analysis)
4650 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4651 (bfd_vma) sum_stack_param.overall_stack);
4655 /* Perform a final link. */
4658 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4660 struct spu_link_hash_table *htab = spu_hash_table (info);
4662 if (htab->params->auto_overlay)
4663 spu_elf_auto_overlay (info);
4665 if ((htab->params->stack_analysis
4666 || (htab->params->ovly_flavour == ovly_soft_icache
4667 && htab->params->lrlive_analysis))
4668 && !spu_elf_stack_analysis (info))
4669 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4671 if (!spu_elf_build_stubs (info))
4672 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4674 return bfd_elf_final_link (output_bfd, info);
4677 /* Called when not normally emitting relocs, ie. !info->relocatable
4678 and !info->emitrelocations. Returns a count of special relocs
4679 that need to be emitted. */
4682 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4684 Elf_Internal_Rela *relocs;
4685 unsigned int count = 0;
4687 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4691 Elf_Internal_Rela *rel;
4692 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4694 for (rel = relocs; rel < relend; rel++)
4696 int r_type = ELF32_R_TYPE (rel->r_info);
4697 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4701 if (elf_section_data (sec)->relocs != relocs)
4708 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4711 spu_elf_relocate_section (bfd *output_bfd,
4712 struct bfd_link_info *info,
4714 asection *input_section,
4716 Elf_Internal_Rela *relocs,
4717 Elf_Internal_Sym *local_syms,
4718 asection **local_sections)
4720 Elf_Internal_Shdr *symtab_hdr;
4721 struct elf_link_hash_entry **sym_hashes;
4722 Elf_Internal_Rela *rel, *relend;
4723 struct spu_link_hash_table *htab;
4726 bfd_boolean emit_these_relocs = FALSE;
4727 bfd_boolean is_ea_sym;
4729 unsigned int iovl = 0;
4731 htab = spu_hash_table (info);
4732 stubs = (htab->stub_sec != NULL
4733 && maybe_needs_stubs (input_section));
4734 iovl = overlay_index (input_section);
4735 ea = bfd_get_section_by_name (output_bfd, "._ea");
4736 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4737 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4740 relend = relocs + input_section->reloc_count;
4741 for (; rel < relend; rel++)
4744 reloc_howto_type *howto;
4745 unsigned int r_symndx;
4746 Elf_Internal_Sym *sym;
4748 struct elf_link_hash_entry *h;
4749 const char *sym_name;
4752 bfd_reloc_status_type r;
4753 bfd_boolean unresolved_reloc;
4755 enum _stub_type stub_type;
4757 r_symndx = ELF32_R_SYM (rel->r_info);
4758 r_type = ELF32_R_TYPE (rel->r_info);
4759 howto = elf_howto_table + r_type;
4760 unresolved_reloc = FALSE;
4765 if (r_symndx < symtab_hdr->sh_info)
4767 sym = local_syms + r_symndx;
4768 sec = local_sections[r_symndx];
4769 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4770 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4774 if (sym_hashes == NULL)
4777 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4779 while (h->root.type == bfd_link_hash_indirect
4780 || h->root.type == bfd_link_hash_warning)
4781 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4784 if (h->root.type == bfd_link_hash_defined
4785 || h->root.type == bfd_link_hash_defweak)
4787 sec = h->root.u.def.section;
4789 || sec->output_section == NULL)
4790 /* Set a flag that will be cleared later if we find a
4791 relocation value for this symbol. output_section
4792 is typically NULL for symbols satisfied by a shared
4794 unresolved_reloc = TRUE;
4796 relocation = (h->root.u.def.value
4797 + sec->output_section->vma
4798 + sec->output_offset);
4800 else if (h->root.type == bfd_link_hash_undefweak)
4802 else if (info->unresolved_syms_in_objects == RM_IGNORE
4803 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4805 else if (!info->relocatable
4806 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4809 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4810 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4811 if (!info->callbacks->undefined_symbol (info,
4812 h->root.root.string,
4815 rel->r_offset, err))
4819 sym_name = h->root.root.string;
4822 if (sec != NULL && elf_discarded_section (sec))
4824 /* For relocs against symbols from removed linkonce sections,
4825 or sections discarded by a linker script, we just want the
4826 section contents zeroed. Avoid any special processing. */
4827 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4833 if (info->relocatable)
4836 is_ea_sym = (ea != NULL
4838 && sec->output_section == ea);
4840 /* If this symbol is in an overlay area, we may need to relocate
4841 to the overlay stub. */
4842 addend = rel->r_addend;
4845 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4846 contents, info)) != no_stub)
4848 unsigned int ovl = 0;
4849 struct got_entry *g, **head;
4851 if (stub_type != nonovl_stub)
4855 head = &h->got.glist;
4857 head = elf_local_got_ents (input_bfd) + r_symndx;
4859 for (g = *head; g != NULL; g = g->next)
4860 if (htab->params->ovly_flavour == ovly_soft_icache
4862 && g->br_addr == (rel->r_offset
4863 + input_section->output_offset
4864 + input_section->output_section->vma))
4865 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4870 relocation = g->stub_addr;
4875 /* For soft icache, encode the overlay index into addresses. */
4876 if (htab->params->ovly_flavour == ovly_soft_icache
4877 && (r_type == R_SPU_ADDR16_HI
4878 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4881 unsigned int ovl = overlay_index (sec);
4884 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4885 relocation += set_id << 18;
4890 if (unresolved_reloc)
4892 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4896 /* ._ea is a special section that isn't allocated in SPU
4897 memory, but rather occupies space in PPU memory as
4898 part of an embedded ELF image. If this reloc is
4899 against a symbol defined in ._ea, then transform the
4900 reloc into an equivalent one without a symbol
4901 relative to the start of the ELF image. */
4902 rel->r_addend += (relocation
4904 + elf_section_data (ea)->this_hdr.sh_offset);
4905 rel->r_info = ELF32_R_INFO (0, r_type);
4907 emit_these_relocs = TRUE;
4911 unresolved_reloc = TRUE;
4913 if (unresolved_reloc)
4915 (*_bfd_error_handler)
4916 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4918 bfd_get_section_name (input_bfd, input_section),
4919 (long) rel->r_offset,
4925 r = _bfd_final_link_relocate (howto,
4929 rel->r_offset, relocation, addend);
4931 if (r != bfd_reloc_ok)
4933 const char *msg = (const char *) 0;
4937 case bfd_reloc_overflow:
4938 if (!((*info->callbacks->reloc_overflow)
4939 (info, (h ? &h->root : NULL), sym_name, howto->name,
4940 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4944 case bfd_reloc_undefined:
4945 if (!((*info->callbacks->undefined_symbol)
4946 (info, sym_name, input_bfd, input_section,
4947 rel->r_offset, TRUE)))
4951 case bfd_reloc_outofrange:
4952 msg = _("internal error: out of range error");
4955 case bfd_reloc_notsupported:
4956 msg = _("internal error: unsupported relocation error");
4959 case bfd_reloc_dangerous:
4960 msg = _("internal error: dangerous error");
4964 msg = _("internal error: unknown error");
4969 if (!((*info->callbacks->warning)
4970 (info, msg, sym_name, input_bfd, input_section,
4979 && emit_these_relocs
4980 && !info->emitrelocations)
4982 Elf_Internal_Rela *wrel;
4983 Elf_Internal_Shdr *rel_hdr;
4985 wrel = rel = relocs;
4986 relend = relocs + input_section->reloc_count;
4987 for (; rel < relend; rel++)
4991 r_type = ELF32_R_TYPE (rel->r_info);
4992 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4995 input_section->reloc_count = wrel - relocs;
4996 /* Backflips for _bfd_elf_link_output_relocs. */
4997 rel_hdr = &elf_section_data (input_section)->rel_hdr;
4998 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5005 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5008 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5009 const char *sym_name ATTRIBUTE_UNUSED,
5010 Elf_Internal_Sym *sym,
5011 asection *sym_sec ATTRIBUTE_UNUSED,
5012 struct elf_link_hash_entry *h)
5014 struct spu_link_hash_table *htab = spu_hash_table (info);
5016 if (!info->relocatable
5017 && htab->stub_sec != NULL
5019 && (h->root.type == bfd_link_hash_defined
5020 || h->root.type == bfd_link_hash_defweak)
5022 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5024 struct got_entry *g;
5026 for (g = h->got.glist; g != NULL; g = g->next)
5027 if (htab->params->ovly_flavour == ovly_soft_icache
5028 ? g->br_addr == g->stub_addr
5029 : g->addend == 0 && g->ovl == 0)
5031 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5032 (htab->stub_sec[0]->output_section->owner,
5033 htab->stub_sec[0]->output_section));
5034 sym->st_value = g->stub_addr;
5042 static int spu_plugin = 0;
5045 spu_elf_plugin (int val)
5050 /* Set ELF header e_type for plugins. */
5053 spu_elf_post_process_headers (bfd *abfd,
5054 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5058 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5060 i_ehdrp->e_type = ET_DYN;
5064 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5065 segments for overlays. */
5068 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5075 struct spu_link_hash_table *htab = spu_hash_table (info);
5076 extra = htab->num_overlays;
5082 sec = bfd_get_section_by_name (abfd, ".toe");
5083 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5089 /* Remove .toe section from other PT_LOAD segments and put it in
5090 a segment of its own. Put overlays in separate segments too. */
5093 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5096 struct elf_segment_map *m, *m_overlay;
5097 struct elf_segment_map **p, **p_overlay;
5103 toe = bfd_get_section_by_name (abfd, ".toe");
5104 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5105 if (m->p_type == PT_LOAD && m->count > 1)
5106 for (i = 0; i < m->count; i++)
5107 if ((s = m->sections[i]) == toe
5108 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5110 struct elf_segment_map *m2;
5113 if (i + 1 < m->count)
5115 amt = sizeof (struct elf_segment_map);
5116 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5117 m2 = bfd_zalloc (abfd, amt);
5120 m2->count = m->count - (i + 1);
5121 memcpy (m2->sections, m->sections + i + 1,
5122 m2->count * sizeof (m->sections[0]));
5123 m2->p_type = PT_LOAD;
5131 amt = sizeof (struct elf_segment_map);
5132 m2 = bfd_zalloc (abfd, amt);
5135 m2->p_type = PT_LOAD;
5137 m2->sections[0] = s;
5145 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5146 PT_LOAD segments. This can cause the .ovl.init section to be
5147 overwritten with the contents of some overlay segment. To work
5148 around this issue, we ensure that all PF_OVERLAY segments are
5149 sorted first amongst the program headers; this ensures that even
5150 with a broken loader, the .ovl.init section (which is not marked
5151 as PF_OVERLAY) will be placed into SPU local store on startup. */
5153 /* Move all overlay segments onto a separate list. */
5154 p = &elf_tdata (abfd)->segment_map;
5155 p_overlay = &m_overlay;
5158 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5159 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5161 struct elf_segment_map *m = *p;
5164 p_overlay = &m->next;
5171 /* Re-insert overlay segments at the head of the segment map. */
5172 *p_overlay = elf_tdata (abfd)->segment_map;
5173 elf_tdata (abfd)->segment_map = m_overlay;
5178 /* Tweak the section type of .note.spu_name. */
5181 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5182 Elf_Internal_Shdr *hdr,
5185 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5186 hdr->sh_type = SHT_NOTE;
5190 /* Tweak phdrs before writing them out. */
5193 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5195 const struct elf_backend_data *bed;
5196 struct elf_obj_tdata *tdata;
5197 Elf_Internal_Phdr *phdr, *last;
5198 struct spu_link_hash_table *htab;
5205 bed = get_elf_backend_data (abfd);
5206 tdata = elf_tdata (abfd);
5208 count = tdata->program_header_size / bed->s->sizeof_phdr;
5209 htab = spu_hash_table (info);
5210 if (htab->num_overlays != 0)
5212 struct elf_segment_map *m;
5215 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5217 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5219 /* Mark this as an overlay header. */
5220 phdr[i].p_flags |= PF_OVERLAY;
5222 if (htab->ovtab != NULL && htab->ovtab->size != 0
5223 && htab->params->ovly_flavour != ovly_soft_icache)
5225 bfd_byte *p = htab->ovtab->contents;
5226 unsigned int off = o * 16 + 8;
5228 /* Write file_off into _ovly_table. */
5229 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5232 /* Soft-icache has its file offset put in .ovl.init. */
5233 if (htab->init != NULL && htab->init->size != 0)
5235 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5237 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5241 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5242 of 16. This should always be possible when using the standard
5243 linker scripts, but don't create overlapping segments if
5244 someone is playing games with linker scripts. */
5246 for (i = count; i-- != 0; )
5247 if (phdr[i].p_type == PT_LOAD)
5251 adjust = -phdr[i].p_filesz & 15;
5254 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5257 adjust = -phdr[i].p_memsz & 15;
5260 && phdr[i].p_filesz != 0
5261 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5262 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5265 if (phdr[i].p_filesz != 0)
5269 if (i == (unsigned int) -1)
5270 for (i = count; i-- != 0; )
5271 if (phdr[i].p_type == PT_LOAD)
5275 adjust = -phdr[i].p_filesz & 15;
5276 phdr[i].p_filesz += adjust;
5278 adjust = -phdr[i].p_memsz & 15;
5279 phdr[i].p_memsz += adjust;
5285 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5286 #define TARGET_BIG_NAME "elf32-spu"
5287 #define ELF_ARCH bfd_arch_spu
5288 #define ELF_MACHINE_CODE EM_SPU
5289 /* This matches the alignment need for DMA. */
5290 #define ELF_MAXPAGESIZE 0x80
5291 #define elf_backend_rela_normal 1
5292 #define elf_backend_can_gc_sections 1
5294 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5295 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5296 #define elf_info_to_howto spu_elf_info_to_howto
5297 #define elf_backend_count_relocs spu_elf_count_relocs
5298 #define elf_backend_relocate_section spu_elf_relocate_section
5299 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5300 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5301 #define elf_backend_object_p spu_elf_object_p
5302 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5303 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5305 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5306 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5307 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5308 #define elf_backend_post_process_headers spu_elf_post_process_headers
5309 #define elf_backend_fake_sections spu_elf_fake_sections
5310 #define elf_backend_special_sections spu_elf_special_sections
5311 #define bfd_elf32_bfd_final_link spu_elf_final_link
5313 #include "elf32-target.h"