1 /* SPU specific support for 32-bit ELF
3 Copyright (C) 2006-2015 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
108 return (enum elf_spu_reloc_type) -1;
111 case BFD_RELOC_SPU_IMM10W:
113 case BFD_RELOC_SPU_IMM16W:
115 case BFD_RELOC_SPU_LO16:
116 return R_SPU_ADDR16_LO;
117 case BFD_RELOC_SPU_HI16:
118 return R_SPU_ADDR16_HI;
119 case BFD_RELOC_SPU_IMM18:
121 case BFD_RELOC_SPU_PCREL16:
123 case BFD_RELOC_SPU_IMM7:
125 case BFD_RELOC_SPU_IMM8:
127 case BFD_RELOC_SPU_PCREL9a:
129 case BFD_RELOC_SPU_PCREL9b:
131 case BFD_RELOC_SPU_IMM10:
132 return R_SPU_ADDR10I;
133 case BFD_RELOC_SPU_IMM16:
134 return R_SPU_ADDR16I;
137 case BFD_RELOC_32_PCREL:
139 case BFD_RELOC_SPU_PPU32:
141 case BFD_RELOC_SPU_PPU64:
143 case BFD_RELOC_SPU_ADD_PIC:
144 return R_SPU_ADD_PIC;
149 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
151 Elf_Internal_Rela *dst)
153 enum elf_spu_reloc_type r_type;
155 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
156 /* PR 17512: file: 90c2a92e. */
157 if (r_type >= R_SPU_max)
159 (*_bfd_error_handler) (_("%B: unrecognised SPU reloc number: %d"),
161 bfd_set_error (bfd_error_bad_value);
164 cache_ptr->howto = &elf_howto_table[(int) r_type];
167 static reloc_howto_type *
168 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
169 bfd_reloc_code_real_type code)
171 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
173 if (r_type == (enum elf_spu_reloc_type) -1)
176 return elf_howto_table + r_type;
179 static reloc_howto_type *
180 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
185 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
186 if (elf_howto_table[i].name != NULL
187 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
188 return &elf_howto_table[i];
193 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
195 static bfd_reloc_status_type
196 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
197 void *data, asection *input_section,
198 bfd *output_bfd, char **error_message)
200 bfd_size_type octets;
204 /* If this is a relocatable link (output_bfd test tells us), just
205 call the generic function. Any adjustment will be done at final
207 if (output_bfd != NULL)
208 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
209 input_section, output_bfd, error_message);
211 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
212 return bfd_reloc_outofrange;
213 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
215 /* Get symbol value. */
217 if (!bfd_is_com_section (symbol->section))
219 if (symbol->section->output_section)
220 val += symbol->section->output_section->vma;
222 val += reloc_entry->addend;
224 /* Make it pc-relative. */
225 val -= input_section->output_section->vma + input_section->output_offset;
228 if (val + 256 >= 512)
229 return bfd_reloc_overflow;
231 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
233 /* Move two high bits of value to REL9I and REL9 position.
234 The mask will take care of selecting the right field. */
235 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
236 insn &= ~reloc_entry->howto->dst_mask;
237 insn |= val & reloc_entry->howto->dst_mask;
238 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
243 spu_elf_new_section_hook (bfd *abfd, asection *sec)
245 if (!sec->used_by_bfd)
247 struct _spu_elf_section_data *sdata;
249 sdata = bfd_zalloc (abfd, sizeof (*sdata));
252 sec->used_by_bfd = sdata;
255 return _bfd_elf_new_section_hook (abfd, sec);
258 /* Set up overlay info for executables. */
261 spu_elf_object_p (bfd *abfd)
263 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
265 unsigned int i, num_ovl, num_buf;
266 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
267 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
268 Elf_Internal_Phdr *last_phdr = NULL;
270 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
271 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
276 if (last_phdr == NULL
277 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
280 for (j = 1; j < elf_numsections (abfd); j++)
282 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
284 if (ELF_SECTION_SIZE (shdr, phdr) != 0
285 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
287 asection *sec = shdr->bfd_section;
288 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
289 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
297 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
298 strip --strip-unneeded will not remove them. */
301 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
303 if (sym->name != NULL
304 && sym->section != bfd_abs_section_ptr
305 && strncmp (sym->name, "_EAR_", 5) == 0)
306 sym->flags |= BSF_KEEP;
309 /* SPU ELF linker hash table. */
311 struct spu_link_hash_table
313 struct elf_link_hash_table elf;
315 struct spu_elf_params *params;
317 /* Shortcuts to overlay sections. */
323 /* Count of stubs in each overlay section. */
324 unsigned int *stub_count;
326 /* The stub section for each overlay section. */
329 struct elf_link_hash_entry *ovly_entry[2];
331 /* Number of overlay buffers. */
332 unsigned int num_buf;
334 /* Total number of overlays. */
335 unsigned int num_overlays;
337 /* For soft icache. */
338 unsigned int line_size_log2;
339 unsigned int num_lines_log2;
340 unsigned int fromelem_size_log2;
342 /* How much memory we have. */
343 unsigned int local_store;
345 /* Count of overlay stubs needed in non-overlay area. */
346 unsigned int non_ovly_stub;
348 /* Pointer to the fixup section */
352 unsigned int stub_err : 1;
355 /* Hijack the generic got fields for overlay stub accounting. */
359 struct got_entry *next;
368 #define spu_hash_table(p) \
369 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
370 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
374 struct function_info *fun;
375 struct call_info *next;
377 unsigned int max_depth;
378 unsigned int is_tail : 1;
379 unsigned int is_pasted : 1;
380 unsigned int broken_cycle : 1;
381 unsigned int priority : 13;
386 /* List of functions called. Also branches to hot/cold part of
388 struct call_info *call_list;
389 /* For hot/cold part of function, point to owner. */
390 struct function_info *start;
391 /* Symbol at start of function. */
393 Elf_Internal_Sym *sym;
394 struct elf_link_hash_entry *h;
396 /* Function section. */
399 /* Where last called from, and number of sections called from. */
400 asection *last_caller;
401 unsigned int call_count;
402 /* Address range of (this part of) function. */
404 /* Offset where we found a store of lr, or -1 if none found. */
406 /* Offset where we found the stack adjustment insn. */
410 /* Distance from root of call tree. Tail and hot/cold branches
411 count as one deeper. We aren't counting stack frames here. */
413 /* Set if global symbol. */
414 unsigned int global : 1;
415 /* Set if known to be start of function (as distinct from a hunk
416 in hot/cold section. */
417 unsigned int is_func : 1;
418 /* Set if not a root node. */
419 unsigned int non_root : 1;
420 /* Flags used during call tree traversal. It's cheaper to replicate
421 the visit flags than have one which needs clearing after a traversal. */
422 unsigned int visit1 : 1;
423 unsigned int visit2 : 1;
424 unsigned int marking : 1;
425 unsigned int visit3 : 1;
426 unsigned int visit4 : 1;
427 unsigned int visit5 : 1;
428 unsigned int visit6 : 1;
429 unsigned int visit7 : 1;
432 struct spu_elf_stack_info
436 /* Variable size array describing functions, one per contiguous
437 address range belonging to a function. */
438 struct function_info fun[1];
441 static struct function_info *find_function (asection *, bfd_vma,
442 struct bfd_link_info *);
444 /* Create a spu ELF linker hash table. */
446 static struct bfd_link_hash_table *
447 spu_elf_link_hash_table_create (bfd *abfd)
449 struct spu_link_hash_table *htab;
451 htab = bfd_zmalloc (sizeof (*htab));
455 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
456 _bfd_elf_link_hash_newfunc,
457 sizeof (struct elf_link_hash_entry),
464 htab->elf.init_got_refcount.refcount = 0;
465 htab->elf.init_got_refcount.glist = NULL;
466 htab->elf.init_got_offset.offset = 0;
467 htab->elf.init_got_offset.glist = NULL;
468 return &htab->elf.root;
472 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
474 bfd_vma max_branch_log2;
476 struct spu_link_hash_table *htab = spu_hash_table (info);
477 htab->params = params;
478 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
479 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
481 /* For the software i-cache, we provide a "from" list whose size
482 is a power-of-two number of quadwords, big enough to hold one
483 byte per outgoing branch. Compute this number here. */
484 max_branch_log2 = bfd_log2 (htab->params->max_branch);
485 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
488 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
489 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
490 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
493 get_sym_h (struct elf_link_hash_entry **hp,
494 Elf_Internal_Sym **symp,
496 Elf_Internal_Sym **locsymsp,
497 unsigned long r_symndx,
500 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
502 if (r_symndx >= symtab_hdr->sh_info)
504 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
505 struct elf_link_hash_entry *h;
507 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
508 while (h->root.type == bfd_link_hash_indirect
509 || h->root.type == bfd_link_hash_warning)
510 h = (struct elf_link_hash_entry *) h->root.u.i.link;
520 asection *symsec = NULL;
521 if (h->root.type == bfd_link_hash_defined
522 || h->root.type == bfd_link_hash_defweak)
523 symsec = h->root.u.def.section;
529 Elf_Internal_Sym *sym;
530 Elf_Internal_Sym *locsyms = *locsymsp;
534 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
536 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
538 0, NULL, NULL, NULL);
543 sym = locsyms + r_symndx;
552 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
558 /* Create the note section if not already present. This is done early so
559 that the linker maps the sections to the right place in the output. */
562 spu_elf_create_sections (struct bfd_link_info *info)
564 struct spu_link_hash_table *htab = spu_hash_table (info);
567 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
568 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
573 /* Make SPU_PTNOTE_SPUNAME section. */
580 ibfd = info->input_bfds;
581 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
582 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
584 || !bfd_set_section_alignment (ibfd, s, 4))
587 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
588 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
589 size += (name_len + 3) & -4;
591 if (!bfd_set_section_size (ibfd, s, size))
594 data = bfd_zalloc (ibfd, size);
598 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
599 bfd_put_32 (ibfd, name_len, data + 4);
600 bfd_put_32 (ibfd, 1, data + 8);
601 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
602 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
603 bfd_get_filename (info->output_bfd), name_len);
607 if (htab->params->emit_fixups)
612 if (htab->elf.dynobj == NULL)
613 htab->elf.dynobj = ibfd;
614 ibfd = htab->elf.dynobj;
615 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
616 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
617 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
618 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
626 /* qsort predicate to sort sections by vma. */
629 sort_sections (const void *a, const void *b)
631 const asection *const *s1 = a;
632 const asection *const *s2 = b;
633 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
636 return delta < 0 ? -1 : 1;
638 return (*s1)->index - (*s2)->index;
641 /* Identify overlays in the output bfd, and number them.
642 Returns 0 on error, 1 if no overlays, 2 if overlays. */
645 spu_elf_find_overlays (struct bfd_link_info *info)
647 struct spu_link_hash_table *htab = spu_hash_table (info);
648 asection **alloc_sec;
649 unsigned int i, n, ovl_index, num_buf;
652 static const char *const entry_names[2][2] = {
653 { "__ovly_load", "__icache_br_handler" },
654 { "__ovly_return", "__icache_call_handler" }
657 if (info->output_bfd->section_count < 2)
661 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
662 if (alloc_sec == NULL)
665 /* Pick out all the alloced sections. */
666 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
667 if ((s->flags & SEC_ALLOC) != 0
668 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
678 /* Sort them by vma. */
679 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
681 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
682 if (htab->params->ovly_flavour == ovly_soft_icache)
684 unsigned int prev_buf = 0, set_id = 0;
686 /* Look for an overlapping vma to find the first overlay section. */
687 bfd_vma vma_start = 0;
689 for (i = 1; i < n; i++)
692 if (s->vma < ovl_end)
694 asection *s0 = alloc_sec[i - 1];
698 << (htab->num_lines_log2 + htab->line_size_log2)));
703 ovl_end = s->vma + s->size;
706 /* Now find any sections within the cache area. */
707 for (ovl_index = 0, num_buf = 0; i < n; i++)
710 if (s->vma >= ovl_end)
713 /* A section in an overlay area called .ovl.init is not
714 an overlay, in the sense that it might be loaded in
715 by the overlay manager, but rather the initial
716 section contents for the overlay buffer. */
717 if (strncmp (s->name, ".ovl.init", 9) != 0)
719 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
720 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
723 if ((s->vma - vma_start) & (htab->params->line_size - 1))
725 info->callbacks->einfo (_("%X%P: overlay section %A "
726 "does not start on a cache line.\n"),
728 bfd_set_error (bfd_error_bad_value);
731 else if (s->size > htab->params->line_size)
733 info->callbacks->einfo (_("%X%P: overlay section %A "
734 "is larger than a cache line.\n"),
736 bfd_set_error (bfd_error_bad_value);
740 alloc_sec[ovl_index++] = s;
741 spu_elf_section_data (s)->u.o.ovl_index
742 = (set_id << htab->num_lines_log2) + num_buf;
743 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
747 /* Ensure there are no more overlay sections. */
751 if (s->vma < ovl_end)
753 info->callbacks->einfo (_("%X%P: overlay section %A "
754 "is not in cache area.\n"),
756 bfd_set_error (bfd_error_bad_value);
760 ovl_end = s->vma + s->size;
765 /* Look for overlapping vmas. Any with overlap must be overlays.
766 Count them. Also count the number of overlay regions. */
767 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
770 if (s->vma < ovl_end)
772 asection *s0 = alloc_sec[i - 1];
774 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
777 if (strncmp (s0->name, ".ovl.init", 9) != 0)
779 alloc_sec[ovl_index] = s0;
780 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
781 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
784 ovl_end = s->vma + s->size;
786 if (strncmp (s->name, ".ovl.init", 9) != 0)
788 alloc_sec[ovl_index] = s;
789 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
790 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
791 if (s0->vma != s->vma)
793 info->callbacks->einfo (_("%X%P: overlay sections %A "
794 "and %A do not start at the "
797 bfd_set_error (bfd_error_bad_value);
800 if (ovl_end < s->vma + s->size)
801 ovl_end = s->vma + s->size;
805 ovl_end = s->vma + s->size;
809 htab->num_overlays = ovl_index;
810 htab->num_buf = num_buf;
811 htab->ovl_sec = alloc_sec;
816 for (i = 0; i < 2; i++)
819 struct elf_link_hash_entry *h;
821 name = entry_names[i][htab->params->ovly_flavour];
822 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
826 if (h->root.type == bfd_link_hash_new)
828 h->root.type = bfd_link_hash_undefined;
830 h->ref_regular_nonweak = 1;
833 htab->ovly_entry[i] = h;
839 /* Non-zero to use bra in overlay stubs rather than br. */
842 #define BRA 0x30000000
843 #define BRASL 0x31000000
844 #define BR 0x32000000
845 #define BRSL 0x33000000
846 #define NOP 0x40200000
847 #define LNOP 0x00200000
848 #define ILA 0x42000000
850 /* Return true for all relative and absolute branch instructions.
858 brhnz 00100011 0.. */
861 is_branch (const unsigned char *insn)
863 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
866 /* Return true for all indirect branch instructions.
874 bihnz 00100101 011 */
877 is_indirect_branch (const unsigned char *insn)
879 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
882 /* Return true for branch hint instructions.
887 is_hint (const unsigned char *insn)
889 return (insn[0] & 0xfc) == 0x10;
892 /* True if INPUT_SECTION might need overlay stubs. */
895 maybe_needs_stubs (asection *input_section)
897 /* No stubs for debug sections and suchlike. */
898 if ((input_section->flags & SEC_ALLOC) == 0)
901 /* No stubs for link-once sections that will be discarded. */
902 if (input_section->output_section == bfd_abs_section_ptr)
905 /* Don't create stubs for .eh_frame references. */
906 if (strcmp (input_section->name, ".eh_frame") == 0)
928 /* Return non-zero if this reloc symbol should go via an overlay stub.
929 Return 2 if the stub must be in non-overlay area. */
931 static enum _stub_type
932 needs_ovl_stub (struct elf_link_hash_entry *h,
933 Elf_Internal_Sym *sym,
935 asection *input_section,
936 Elf_Internal_Rela *irela,
938 struct bfd_link_info *info)
940 struct spu_link_hash_table *htab = spu_hash_table (info);
941 enum elf_spu_reloc_type r_type;
942 unsigned int sym_type;
943 bfd_boolean branch, hint, call;
944 enum _stub_type ret = no_stub;
948 || sym_sec->output_section == bfd_abs_section_ptr
949 || spu_elf_section_data (sym_sec->output_section) == NULL)
954 /* Ensure no stubs for user supplied overlay manager syms. */
955 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
958 /* setjmp always goes via an overlay stub, because then the return
959 and hence the longjmp goes via __ovly_return. That magically
960 makes setjmp/longjmp between overlays work. */
961 if (strncmp (h->root.root.string, "setjmp", 6) == 0
962 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
969 sym_type = ELF_ST_TYPE (sym->st_info);
971 r_type = ELF32_R_TYPE (irela->r_info);
975 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
977 if (contents == NULL)
980 if (!bfd_get_section_contents (input_section->owner,
987 contents += irela->r_offset;
989 branch = is_branch (contents);
990 hint = is_hint (contents);
993 call = (contents[0] & 0xfd) == 0x31;
995 && sym_type != STT_FUNC
998 /* It's common for people to write assembly and forget
999 to give function symbols the right type. Handle
1000 calls to such symbols, but warn so that (hopefully)
1001 people will fix their code. We need the symbol
1002 type to be correct to distinguish function pointer
1003 initialisation from other pointer initialisations. */
1004 const char *sym_name;
1007 sym_name = h->root.root.string;
1010 Elf_Internal_Shdr *symtab_hdr;
1011 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1012 sym_name = bfd_elf_sym_name (input_section->owner,
1017 (*_bfd_error_handler) (_("warning: call to non-function"
1018 " symbol %s defined in %B"),
1019 sym_sec->owner, sym_name);
1025 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1026 || (sym_type != STT_FUNC
1027 && !(branch || hint)
1028 && (sym_sec->flags & SEC_CODE) == 0))
1031 /* Usually, symbols in non-overlay sections don't need stubs. */
1032 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1033 && !htab->params->non_overlay_stubs)
1036 /* A reference from some other section to a symbol in an overlay
1037 section needs a stub. */
1038 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1039 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1041 unsigned int lrlive = 0;
1043 lrlive = (contents[1] & 0x70) >> 4;
1045 if (!lrlive && (call || sym_type == STT_FUNC))
1046 ret = call_ovl_stub;
1048 ret = br000_ovl_stub + lrlive;
1051 /* If this insn isn't a branch then we are possibly taking the
1052 address of a function and passing it out somehow. Soft-icache code
1053 always generates inline code to do indirect branches. */
1054 if (!(branch || hint)
1055 && sym_type == STT_FUNC
1056 && htab->params->ovly_flavour != ovly_soft_icache)
1063 count_stub (struct spu_link_hash_table *htab,
1066 enum _stub_type stub_type,
1067 struct elf_link_hash_entry *h,
1068 const Elf_Internal_Rela *irela)
1070 unsigned int ovl = 0;
1071 struct got_entry *g, **head;
1074 /* If this instruction is a branch or call, we need a stub
1075 for it. One stub per function per overlay.
1076 If it isn't a branch, then we are taking the address of
1077 this function so need a stub in the non-overlay area
1078 for it. One stub per function. */
1079 if (stub_type != nonovl_stub)
1080 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1083 head = &h->got.glist;
1086 if (elf_local_got_ents (ibfd) == NULL)
1088 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1089 * sizeof (*elf_local_got_ents (ibfd)));
1090 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1091 if (elf_local_got_ents (ibfd) == NULL)
1094 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1097 if (htab->params->ovly_flavour == ovly_soft_icache)
1099 htab->stub_count[ovl] += 1;
1105 addend = irela->r_addend;
1109 struct got_entry *gnext;
1111 for (g = *head; g != NULL; g = g->next)
1112 if (g->addend == addend && g->ovl == 0)
1117 /* Need a new non-overlay area stub. Zap other stubs. */
1118 for (g = *head; g != NULL; g = gnext)
1121 if (g->addend == addend)
1123 htab->stub_count[g->ovl] -= 1;
1131 for (g = *head; g != NULL; g = g->next)
1132 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1138 g = bfd_malloc (sizeof *g);
1143 g->stub_addr = (bfd_vma) -1;
1147 htab->stub_count[ovl] += 1;
1153 /* Support two sizes of overlay stubs, a slower more compact stub of two
1154 instructions, and a faster stub of four instructions.
1155 Soft-icache stubs are four or eight words. */
1158 ovl_stub_size (struct spu_elf_params *params)
1160 return 16 << params->ovly_flavour >> params->compact_stub;
1164 ovl_stub_size_log2 (struct spu_elf_params *params)
1166 return 4 + params->ovly_flavour - params->compact_stub;
1169 /* Two instruction overlay stubs look like:
1171 brsl $75,__ovly_load
1172 .word target_ovl_and_address
1174 ovl_and_address is a word with the overlay number in the top 14 bits
1175 and local store address in the bottom 18 bits.
1177 Four instruction overlay stubs look like:
1181 ila $79,target_address
1184 Software icache stubs are:
1188 .word lrlive_branchlocalstoreaddr;
1189 brasl $75,__icache_br_handler
1194 build_stub (struct bfd_link_info *info,
1197 enum _stub_type stub_type,
1198 struct elf_link_hash_entry *h,
1199 const Elf_Internal_Rela *irela,
1203 struct spu_link_hash_table *htab = spu_hash_table (info);
1204 unsigned int ovl, dest_ovl, set_id;
1205 struct got_entry *g, **head;
1207 bfd_vma addend, from, to, br_dest, patt;
1208 unsigned int lrlive;
1211 if (stub_type != nonovl_stub)
1212 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1215 head = &h->got.glist;
1217 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1221 addend = irela->r_addend;
1223 if (htab->params->ovly_flavour == ovly_soft_icache)
1225 g = bfd_malloc (sizeof *g);
1231 g->br_addr = (irela->r_offset
1232 + isec->output_offset
1233 + isec->output_section->vma);
1239 for (g = *head; g != NULL; g = g->next)
1240 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1245 if (g->ovl == 0 && ovl != 0)
1248 if (g->stub_addr != (bfd_vma) -1)
1252 sec = htab->stub_sec[ovl];
1253 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1254 from = sec->size + sec->output_offset + sec->output_section->vma;
1255 g->stub_addr = from;
1256 to = (htab->ovly_entry[0]->root.u.def.value
1257 + htab->ovly_entry[0]->root.u.def.section->output_offset
1258 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1260 if (((dest | to | from) & 3) != 0)
1265 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1267 if (htab->params->ovly_flavour == ovly_normal
1268 && !htab->params->compact_stub)
1270 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1271 sec->contents + sec->size);
1272 bfd_put_32 (sec->owner, LNOP,
1273 sec->contents + sec->size + 4);
1274 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1275 sec->contents + sec->size + 8);
1277 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1278 sec->contents + sec->size + 12);
1280 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1281 sec->contents + sec->size + 12);
1283 else if (htab->params->ovly_flavour == ovly_normal
1284 && htab->params->compact_stub)
1287 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1288 sec->contents + sec->size);
1290 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1291 sec->contents + sec->size);
1292 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1293 sec->contents + sec->size + 4);
1295 else if (htab->params->ovly_flavour == ovly_soft_icache
1296 && htab->params->compact_stub)
1299 if (stub_type == nonovl_stub)
1301 else if (stub_type == call_ovl_stub)
1302 /* A brsl makes lr live and *(*sp+16) is live.
1303 Tail calls have the same liveness. */
1305 else if (!htab->params->lrlive_analysis)
1306 /* Assume stack frame and lr save. */
1308 else if (irela != NULL)
1310 /* Analyse branch instructions. */
1311 struct function_info *caller;
1314 caller = find_function (isec, irela->r_offset, info);
1315 if (caller->start == NULL)
1316 off = irela->r_offset;
1319 struct function_info *found = NULL;
1321 /* Find the earliest piece of this function that
1322 has frame adjusting instructions. We might
1323 see dynamic frame adjustment (eg. for alloca)
1324 in some later piece, but functions using
1325 alloca always set up a frame earlier. Frame
1326 setup instructions are always in one piece. */
1327 if (caller->lr_store != (bfd_vma) -1
1328 || caller->sp_adjust != (bfd_vma) -1)
1330 while (caller->start != NULL)
1332 caller = caller->start;
1333 if (caller->lr_store != (bfd_vma) -1
1334 || caller->sp_adjust != (bfd_vma) -1)
1342 if (off > caller->sp_adjust)
1344 if (off > caller->lr_store)
1345 /* Only *(*sp+16) is live. */
1348 /* If no lr save, then we must be in a
1349 leaf function with a frame.
1350 lr is still live. */
1353 else if (off > caller->lr_store)
1355 /* Between lr save and stack adjust. */
1357 /* This should never happen since prologues won't
1362 /* On entry to function. */
1365 if (stub_type != br000_ovl_stub
1366 && lrlive != stub_type - br000_ovl_stub)
1367 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1368 "from analysis (%u)\n"),
1369 isec, irela->r_offset, lrlive,
1370 stub_type - br000_ovl_stub);
1373 /* If given lrlive info via .brinfo, use it. */
1374 if (stub_type > br000_ovl_stub)
1375 lrlive = stub_type - br000_ovl_stub;
1378 to = (htab->ovly_entry[1]->root.u.def.value
1379 + htab->ovly_entry[1]->root.u.def.section->output_offset
1380 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1382 /* The branch that uses this stub goes to stub_addr + 4. We'll
1383 set up an xor pattern that can be used by the icache manager
1384 to modify this branch to go directly to its destination. */
1386 br_dest = g->stub_addr;
1389 /* Except in the case of _SPUEAR_ stubs, the branch in
1390 question is the one in the stub itself. */
1391 BFD_ASSERT (stub_type == nonovl_stub);
1392 g->br_addr = g->stub_addr;
1396 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1397 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1398 sec->contents + sec->size);
1399 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1400 sec->contents + sec->size + 4);
1401 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1402 sec->contents + sec->size + 8);
1403 patt = dest ^ br_dest;
1404 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1405 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1406 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1407 sec->contents + sec->size + 12);
1410 /* Extra space for linked list entries. */
1416 sec->size += ovl_stub_size (htab->params);
1418 if (htab->params->emit_stub_syms)
1424 len = 8 + sizeof (".ovl_call.") - 1;
1426 len += strlen (h->root.root.string);
1431 add = (int) irela->r_addend & 0xffffffff;
1434 name = bfd_malloc (len + 1);
1438 sprintf (name, "%08x.ovl_call.", g->ovl);
1440 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1442 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1443 dest_sec->id & 0xffffffff,
1444 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1446 sprintf (name + len - 9, "+%x", add);
1448 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1452 if (h->root.type == bfd_link_hash_new)
1454 h->root.type = bfd_link_hash_defined;
1455 h->root.u.def.section = sec;
1456 h->size = ovl_stub_size (htab->params);
1457 h->root.u.def.value = sec->size - h->size;
1461 h->ref_regular_nonweak = 1;
1462 h->forced_local = 1;
1470 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1474 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1476 /* Symbols starting with _SPUEAR_ need a stub because they may be
1477 invoked by the PPU. */
1478 struct bfd_link_info *info = inf;
1479 struct spu_link_hash_table *htab = spu_hash_table (info);
1482 if ((h->root.type == bfd_link_hash_defined
1483 || h->root.type == bfd_link_hash_defweak)
1485 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1486 && (sym_sec = h->root.u.def.section) != NULL
1487 && sym_sec->output_section != bfd_abs_section_ptr
1488 && spu_elf_section_data (sym_sec->output_section) != NULL
1489 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1490 || htab->params->non_overlay_stubs))
1492 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1499 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1501 /* Symbols starting with _SPUEAR_ need a stub because they may be
1502 invoked by the PPU. */
1503 struct bfd_link_info *info = inf;
1504 struct spu_link_hash_table *htab = spu_hash_table (info);
1507 if ((h->root.type == bfd_link_hash_defined
1508 || h->root.type == bfd_link_hash_defweak)
1510 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1511 && (sym_sec = h->root.u.def.section) != NULL
1512 && sym_sec->output_section != bfd_abs_section_ptr
1513 && spu_elf_section_data (sym_sec->output_section) != NULL
1514 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1515 || htab->params->non_overlay_stubs))
1517 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1518 h->root.u.def.value, sym_sec);
1524 /* Size or build stubs. */
1527 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1529 struct spu_link_hash_table *htab = spu_hash_table (info);
1532 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1534 extern const bfd_target spu_elf32_vec;
1535 Elf_Internal_Shdr *symtab_hdr;
1537 Elf_Internal_Sym *local_syms = NULL;
1539 if (ibfd->xvec != &spu_elf32_vec)
1542 /* We'll need the symbol table in a second. */
1543 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1544 if (symtab_hdr->sh_info == 0)
1547 /* Walk over each section attached to the input bfd. */
1548 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1550 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1552 /* If there aren't any relocs, then there's nothing more to do. */
1553 if ((isec->flags & SEC_RELOC) == 0
1554 || isec->reloc_count == 0)
1557 if (!maybe_needs_stubs (isec))
1560 /* Get the relocs. */
1561 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1563 if (internal_relocs == NULL)
1564 goto error_ret_free_local;
1566 /* Now examine each relocation. */
1567 irela = internal_relocs;
1568 irelaend = irela + isec->reloc_count;
1569 for (; irela < irelaend; irela++)
1571 enum elf_spu_reloc_type r_type;
1572 unsigned int r_indx;
1574 Elf_Internal_Sym *sym;
1575 struct elf_link_hash_entry *h;
1576 enum _stub_type stub_type;
1578 r_type = ELF32_R_TYPE (irela->r_info);
1579 r_indx = ELF32_R_SYM (irela->r_info);
1581 if (r_type >= R_SPU_max)
1583 bfd_set_error (bfd_error_bad_value);
1584 error_ret_free_internal:
1585 if (elf_section_data (isec)->relocs != internal_relocs)
1586 free (internal_relocs);
1587 error_ret_free_local:
1588 if (local_syms != NULL
1589 && (symtab_hdr->contents
1590 != (unsigned char *) local_syms))
1595 /* Determine the reloc target section. */
1596 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1597 goto error_ret_free_internal;
1599 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1601 if (stub_type == no_stub)
1603 else if (stub_type == stub_error)
1604 goto error_ret_free_internal;
1606 if (htab->stub_count == NULL)
1609 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1610 htab->stub_count = bfd_zmalloc (amt);
1611 if (htab->stub_count == NULL)
1612 goto error_ret_free_internal;
1617 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1618 goto error_ret_free_internal;
1625 dest = h->root.u.def.value;
1627 dest = sym->st_value;
1628 dest += irela->r_addend;
1629 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1631 goto error_ret_free_internal;
1635 /* We're done with the internal relocs, free them. */
1636 if (elf_section_data (isec)->relocs != internal_relocs)
1637 free (internal_relocs);
1640 if (local_syms != NULL
1641 && symtab_hdr->contents != (unsigned char *) local_syms)
1643 if (!info->keep_memory)
1646 symtab_hdr->contents = (unsigned char *) local_syms;
1653 /* Allocate space for overlay call and return stubs.
1654 Return 0 on error, 1 if no overlays, 2 otherwise. */
1657 spu_elf_size_stubs (struct bfd_link_info *info)
1659 struct spu_link_hash_table *htab;
1666 if (!process_stubs (info, FALSE))
1669 htab = spu_hash_table (info);
1670 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1674 ibfd = info->input_bfds;
1675 if (htab->stub_count != NULL)
1677 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1678 htab->stub_sec = bfd_zmalloc (amt);
1679 if (htab->stub_sec == NULL)
1682 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1683 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1684 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1685 htab->stub_sec[0] = stub;
1687 || !bfd_set_section_alignment (ibfd, stub,
1688 ovl_stub_size_log2 (htab->params)))
1690 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1691 if (htab->params->ovly_flavour == ovly_soft_icache)
1692 /* Extra space for linked list entries. */
1693 stub->size += htab->stub_count[0] * 16;
1695 for (i = 0; i < htab->num_overlays; ++i)
1697 asection *osec = htab->ovl_sec[i];
1698 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1699 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1700 htab->stub_sec[ovl] = stub;
1702 || !bfd_set_section_alignment (ibfd, stub,
1703 ovl_stub_size_log2 (htab->params)))
1705 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1709 if (htab->params->ovly_flavour == ovly_soft_icache)
1711 /* Space for icache manager tables.
1712 a) Tag array, one quadword per cache line.
1713 b) Rewrite "to" list, one quadword per cache line.
1714 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1715 a power-of-two number of full quadwords) per cache line. */
1718 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1719 if (htab->ovtab == NULL
1720 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1723 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1724 << htab->num_lines_log2;
1726 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1727 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1728 if (htab->init == NULL
1729 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1732 htab->init->size = 16;
1734 else if (htab->stub_count == NULL)
1738 /* htab->ovtab consists of two arrays.
1748 . } _ovly_buf_table[];
1751 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1752 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1753 if (htab->ovtab == NULL
1754 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1757 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1760 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1761 if (htab->toe == NULL
1762 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1764 htab->toe->size = 16;
1769 /* Called from ld to place overlay manager data sections. This is done
1770 after the overlay manager itself is loaded, mainly so that the
1771 linker's htab->init section is placed after any other .ovl.init
1775 spu_elf_place_overlay_data (struct bfd_link_info *info)
1777 struct spu_link_hash_table *htab = spu_hash_table (info);
1780 if (htab->stub_sec != NULL)
1782 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1784 for (i = 0; i < htab->num_overlays; ++i)
1786 asection *osec = htab->ovl_sec[i];
1787 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1788 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1792 if (htab->params->ovly_flavour == ovly_soft_icache)
1793 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1795 if (htab->ovtab != NULL)
1797 const char *ovout = ".data";
1798 if (htab->params->ovly_flavour == ovly_soft_icache)
1800 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1803 if (htab->toe != NULL)
1804 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1807 /* Functions to handle embedded spu_ovl.o object. */
1810 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1816 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1822 struct _ovl_stream *os;
1826 os = (struct _ovl_stream *) stream;
1827 max = (const char *) os->end - (const char *) os->start;
1829 if ((ufile_ptr) offset >= max)
1833 if (count > max - offset)
1834 count = max - offset;
1836 memcpy (buf, (const char *) os->start + offset, count);
1841 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1845 struct _ovl_stream *os = (struct _ovl_stream *) stream;
1847 memset (sb, 0, sizeof (*sb));
1848 sb->st_size = (const char *) os->end - (const char *) os->start;
1853 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1855 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1862 return *ovl_bfd != NULL;
1866 overlay_index (asection *sec)
1869 || sec->output_section == bfd_abs_section_ptr)
1871 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1874 /* Define an STT_OBJECT symbol. */
1876 static struct elf_link_hash_entry *
1877 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1879 struct elf_link_hash_entry *h;
1881 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1885 if (h->root.type != bfd_link_hash_defined
1888 h->root.type = bfd_link_hash_defined;
1889 h->root.u.def.section = htab->ovtab;
1890 h->type = STT_OBJECT;
1893 h->ref_regular_nonweak = 1;
1896 else if (h->root.u.def.section->owner != NULL)
1898 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1899 h->root.u.def.section->owner,
1900 h->root.root.string);
1901 bfd_set_error (bfd_error_bad_value);
1906 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1907 h->root.root.string);
1908 bfd_set_error (bfd_error_bad_value);
1915 /* Fill in all stubs and the overlay tables. */
1918 spu_elf_build_stubs (struct bfd_link_info *info)
1920 struct spu_link_hash_table *htab = spu_hash_table (info);
1921 struct elf_link_hash_entry *h;
1927 if (htab->num_overlays != 0)
1929 for (i = 0; i < 2; i++)
1931 h = htab->ovly_entry[i];
1933 && (h->root.type == bfd_link_hash_defined
1934 || h->root.type == bfd_link_hash_defweak)
1937 s = h->root.u.def.section->output_section;
1938 if (spu_elf_section_data (s)->u.o.ovl_index)
1940 (*_bfd_error_handler) (_("%s in overlay section"),
1941 h->root.root.string);
1942 bfd_set_error (bfd_error_bad_value);
1949 if (htab->stub_sec != NULL)
1951 for (i = 0; i <= htab->num_overlays; i++)
1952 if (htab->stub_sec[i]->size != 0)
1954 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1955 htab->stub_sec[i]->size);
1956 if (htab->stub_sec[i]->contents == NULL)
1958 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1959 htab->stub_sec[i]->size = 0;
1962 /* Fill in all the stubs. */
1963 process_stubs (info, TRUE);
1964 if (!htab->stub_err)
1965 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1969 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1970 bfd_set_error (bfd_error_bad_value);
1974 for (i = 0; i <= htab->num_overlays; i++)
1976 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1978 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1979 bfd_set_error (bfd_error_bad_value);
1982 htab->stub_sec[i]->rawsize = 0;
1986 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1989 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1990 if (htab->ovtab->contents == NULL)
1993 p = htab->ovtab->contents;
1994 if (htab->params->ovly_flavour == ovly_soft_icache)
1998 h = define_ovtab_symbol (htab, "__icache_tag_array");
2001 h->root.u.def.value = 0;
2002 h->size = 16 << htab->num_lines_log2;
2005 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2008 h->root.u.def.value = 16 << htab->num_lines_log2;
2009 h->root.u.def.section = bfd_abs_section_ptr;
2011 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2014 h->root.u.def.value = off;
2015 h->size = 16 << htab->num_lines_log2;
2018 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2021 h->root.u.def.value = 16 << htab->num_lines_log2;
2022 h->root.u.def.section = bfd_abs_section_ptr;
2024 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2027 h->root.u.def.value = off;
2028 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2031 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2034 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2035 + htab->num_lines_log2);
2036 h->root.u.def.section = bfd_abs_section_ptr;
2038 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2041 h->root.u.def.value = htab->fromelem_size_log2;
2042 h->root.u.def.section = bfd_abs_section_ptr;
2044 h = define_ovtab_symbol (htab, "__icache_base");
2047 h->root.u.def.value = htab->ovl_sec[0]->vma;
2048 h->root.u.def.section = bfd_abs_section_ptr;
2049 h->size = htab->num_buf << htab->line_size_log2;
2051 h = define_ovtab_symbol (htab, "__icache_linesize");
2054 h->root.u.def.value = 1 << htab->line_size_log2;
2055 h->root.u.def.section = bfd_abs_section_ptr;
2057 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2060 h->root.u.def.value = htab->line_size_log2;
2061 h->root.u.def.section = bfd_abs_section_ptr;
2063 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2066 h->root.u.def.value = -htab->line_size_log2;
2067 h->root.u.def.section = bfd_abs_section_ptr;
2069 h = define_ovtab_symbol (htab, "__icache_cachesize");
2072 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2073 h->root.u.def.section = bfd_abs_section_ptr;
2075 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2078 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2079 h->root.u.def.section = bfd_abs_section_ptr;
2081 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2084 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2085 h->root.u.def.section = bfd_abs_section_ptr;
2087 if (htab->init != NULL && htab->init->size != 0)
2089 htab->init->contents = bfd_zalloc (htab->init->owner,
2091 if (htab->init->contents == NULL)
2094 h = define_ovtab_symbol (htab, "__icache_fileoff");
2097 h->root.u.def.value = 0;
2098 h->root.u.def.section = htab->init;
2104 /* Write out _ovly_table. */
2105 /* set low bit of .size to mark non-overlay area as present. */
2107 obfd = htab->ovtab->output_section->owner;
2108 for (s = obfd->sections; s != NULL; s = s->next)
2110 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2114 unsigned long off = ovl_index * 16;
2115 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2117 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2118 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2120 /* file_off written later in spu_elf_modify_program_headers. */
2121 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2125 h = define_ovtab_symbol (htab, "_ovly_table");
2128 h->root.u.def.value = 16;
2129 h->size = htab->num_overlays * 16;
2131 h = define_ovtab_symbol (htab, "_ovly_table_end");
2134 h->root.u.def.value = htab->num_overlays * 16 + 16;
2137 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2140 h->root.u.def.value = htab->num_overlays * 16 + 16;
2141 h->size = htab->num_buf * 4;
2143 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2146 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2150 h = define_ovtab_symbol (htab, "_EAR_");
2153 h->root.u.def.section = htab->toe;
2154 h->root.u.def.value = 0;
2160 /* Check that all loadable section VMAs lie in the range
2161 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2164 spu_elf_check_vma (struct bfd_link_info *info)
2166 struct elf_segment_map *m;
2168 struct spu_link_hash_table *htab = spu_hash_table (info);
2169 bfd *abfd = info->output_bfd;
2170 bfd_vma hi = htab->params->local_store_hi;
2171 bfd_vma lo = htab->params->local_store_lo;
2173 htab->local_store = hi + 1 - lo;
2175 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2176 if (m->p_type == PT_LOAD)
2177 for (i = 0; i < m->count; i++)
2178 if (m->sections[i]->size != 0
2179 && (m->sections[i]->vma < lo
2180 || m->sections[i]->vma > hi
2181 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2182 return m->sections[i];
2187 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2188 Search for stack adjusting insns, and return the sp delta.
2189 If a store of lr is found save the instruction offset to *LR_STORE.
2190 If a stack adjusting instruction is found, save that offset to
2194 find_function_stack_adjust (asection *sec,
2201 memset (reg, 0, sizeof (reg));
2202 for ( ; offset + 4 <= sec->size; offset += 4)
2204 unsigned char buf[4];
2208 /* Assume no relocs on stack adjusing insns. */
2209 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2213 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2215 if (buf[0] == 0x24 /* stqd */)
2217 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2222 /* Partly decoded immediate field. */
2223 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2225 if (buf[0] == 0x1c /* ai */)
2228 imm = (imm ^ 0x200) - 0x200;
2229 reg[rt] = reg[ra] + imm;
2231 if (rt == 1 /* sp */)
2235 *sp_adjust = offset;
2239 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2241 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2243 reg[rt] = reg[ra] + reg[rb];
2248 *sp_adjust = offset;
2252 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2254 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2256 reg[rt] = reg[rb] - reg[ra];
2261 *sp_adjust = offset;
2265 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2267 if (buf[0] >= 0x42 /* ila */)
2268 imm |= (buf[0] & 1) << 17;
2273 if (buf[0] == 0x40 /* il */)
2275 if ((buf[1] & 0x80) == 0)
2277 imm = (imm ^ 0x8000) - 0x8000;
2279 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2285 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2287 reg[rt] |= imm & 0xffff;
2290 else if (buf[0] == 0x04 /* ori */)
2293 imm = (imm ^ 0x200) - 0x200;
2294 reg[rt] = reg[ra] | imm;
2297 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2299 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2300 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2301 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2302 | ((imm & 0x1000) ? 0x000000ff : 0));
2305 else if (buf[0] == 0x16 /* andbi */)
2311 reg[rt] = reg[ra] & imm;
2314 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2316 /* Used in pic reg load. Say rt is trashed. Won't be used
2317 in stack adjust, but we need to continue past this branch. */
2321 else if (is_branch (buf) || is_indirect_branch (buf))
2322 /* If we hit a branch then we must be out of the prologue. */
2329 /* qsort predicate to sort symbols by section and value. */
2331 static Elf_Internal_Sym *sort_syms_syms;
2332 static asection **sort_syms_psecs;
2335 sort_syms (const void *a, const void *b)
2337 Elf_Internal_Sym *const *s1 = a;
2338 Elf_Internal_Sym *const *s2 = b;
2339 asection *sec1,*sec2;
2340 bfd_signed_vma delta;
2342 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2343 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2346 return sec1->index - sec2->index;
2348 delta = (*s1)->st_value - (*s2)->st_value;
2350 return delta < 0 ? -1 : 1;
2352 delta = (*s2)->st_size - (*s1)->st_size;
2354 return delta < 0 ? -1 : 1;
2356 return *s1 < *s2 ? -1 : 1;
2359 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2360 entries for section SEC. */
2362 static struct spu_elf_stack_info *
2363 alloc_stack_info (asection *sec, int max_fun)
2365 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2368 amt = sizeof (struct spu_elf_stack_info);
2369 amt += (max_fun - 1) * sizeof (struct function_info);
2370 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2371 if (sec_data->u.i.stack_info != NULL)
2372 sec_data->u.i.stack_info->max_fun = max_fun;
2373 return sec_data->u.i.stack_info;
2376 /* Add a new struct function_info describing a (part of a) function
2377 starting at SYM_H. Keep the array sorted by address. */
2379 static struct function_info *
2380 maybe_insert_function (asection *sec,
2383 bfd_boolean is_func)
2385 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2386 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2392 sinfo = alloc_stack_info (sec, 20);
2399 Elf_Internal_Sym *sym = sym_h;
2400 off = sym->st_value;
2401 size = sym->st_size;
2405 struct elf_link_hash_entry *h = sym_h;
2406 off = h->root.u.def.value;
2410 for (i = sinfo->num_fun; --i >= 0; )
2411 if (sinfo->fun[i].lo <= off)
2416 /* Don't add another entry for an alias, but do update some
2418 if (sinfo->fun[i].lo == off)
2420 /* Prefer globals over local syms. */
2421 if (global && !sinfo->fun[i].global)
2423 sinfo->fun[i].global = TRUE;
2424 sinfo->fun[i].u.h = sym_h;
2427 sinfo->fun[i].is_func = TRUE;
2428 return &sinfo->fun[i];
2430 /* Ignore a zero-size symbol inside an existing function. */
2431 else if (sinfo->fun[i].hi > off && size == 0)
2432 return &sinfo->fun[i];
2435 if (sinfo->num_fun >= sinfo->max_fun)
2437 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2438 bfd_size_type old = amt;
2440 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2441 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2442 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2443 sinfo = bfd_realloc (sinfo, amt);
2446 memset ((char *) sinfo + old, 0, amt - old);
2447 sec_data->u.i.stack_info = sinfo;
2450 if (++i < sinfo->num_fun)
2451 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2452 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2453 sinfo->fun[i].is_func = is_func;
2454 sinfo->fun[i].global = global;
2455 sinfo->fun[i].sec = sec;
2457 sinfo->fun[i].u.h = sym_h;
2459 sinfo->fun[i].u.sym = sym_h;
2460 sinfo->fun[i].lo = off;
2461 sinfo->fun[i].hi = off + size;
2462 sinfo->fun[i].lr_store = -1;
2463 sinfo->fun[i].sp_adjust = -1;
2464 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2465 &sinfo->fun[i].lr_store,
2466 &sinfo->fun[i].sp_adjust);
2467 sinfo->num_fun += 1;
2468 return &sinfo->fun[i];
2471 /* Return the name of FUN. */
2474 func_name (struct function_info *fun)
2478 Elf_Internal_Shdr *symtab_hdr;
2480 while (fun->start != NULL)
2484 return fun->u.h->root.root.string;
2487 if (fun->u.sym->st_name == 0)
2489 size_t len = strlen (sec->name);
2490 char *name = bfd_malloc (len + 10);
2493 sprintf (name, "%s+%lx", sec->name,
2494 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2498 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2499 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2502 /* Read the instruction at OFF in SEC. Return true iff the instruction
2503 is a nop, lnop, or stop 0 (all zero insn). */
2506 is_nop (asection *sec, bfd_vma off)
2508 unsigned char insn[4];
2510 if (off + 4 > sec->size
2511 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2513 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2515 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2520 /* Extend the range of FUN to cover nop padding up to LIMIT.
2521 Return TRUE iff some instruction other than a NOP was found. */
2524 insns_at_end (struct function_info *fun, bfd_vma limit)
2526 bfd_vma off = (fun->hi + 3) & -4;
2528 while (off < limit && is_nop (fun->sec, off))
2539 /* Check and fix overlapping function ranges. Return TRUE iff there
2540 are gaps in the current info we have about functions in SEC. */
2543 check_function_ranges (asection *sec, struct bfd_link_info *info)
2545 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2546 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2548 bfd_boolean gaps = FALSE;
2553 for (i = 1; i < sinfo->num_fun; i++)
2554 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2556 /* Fix overlapping symbols. */
2557 const char *f1 = func_name (&sinfo->fun[i - 1]);
2558 const char *f2 = func_name (&sinfo->fun[i]);
2560 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2561 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2563 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2566 if (sinfo->num_fun == 0)
2570 if (sinfo->fun[0].lo != 0)
2572 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2574 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2576 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2577 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2579 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2585 /* Search current function info for a function that contains address
2586 OFFSET in section SEC. */
2588 static struct function_info *
2589 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2591 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2592 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2596 hi = sinfo->num_fun;
2599 mid = (lo + hi) / 2;
2600 if (offset < sinfo->fun[mid].lo)
2602 else if (offset >= sinfo->fun[mid].hi)
2605 return &sinfo->fun[mid];
2607 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2609 bfd_set_error (bfd_error_bad_value);
2613 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2614 if CALLEE was new. If this function return FALSE, CALLEE should
2618 insert_callee (struct function_info *caller, struct call_info *callee)
2620 struct call_info **pp, *p;
2622 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2623 if (p->fun == callee->fun)
2625 /* Tail calls use less stack than normal calls. Retain entry
2626 for normal call over one for tail call. */
2627 p->is_tail &= callee->is_tail;
2630 p->fun->start = NULL;
2631 p->fun->is_func = TRUE;
2633 p->count += callee->count;
2634 /* Reorder list so most recent call is first. */
2636 p->next = caller->call_list;
2637 caller->call_list = p;
2640 callee->next = caller->call_list;
2641 caller->call_list = callee;
2645 /* Copy CALL and insert the copy into CALLER. */
2648 copy_callee (struct function_info *caller, const struct call_info *call)
2650 struct call_info *callee;
2651 callee = bfd_malloc (sizeof (*callee));
2655 if (!insert_callee (caller, callee))
2660 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2661 overlay stub sections. */
2664 interesting_section (asection *s)
2666 return (s->output_section != bfd_abs_section_ptr
2667 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2668 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2672 /* Rummage through the relocs for SEC, looking for function calls.
2673 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2674 mark destination symbols on calls as being functions. Also
2675 look at branches, which may be tail calls or go to hot/cold
2676 section part of same function. */
2679 mark_functions_via_relocs (asection *sec,
2680 struct bfd_link_info *info,
2683 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2684 Elf_Internal_Shdr *symtab_hdr;
2686 unsigned int priority = 0;
2687 static bfd_boolean warned;
2689 if (!interesting_section (sec)
2690 || sec->reloc_count == 0)
2693 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2695 if (internal_relocs == NULL)
2698 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2699 psyms = &symtab_hdr->contents;
2700 irela = internal_relocs;
2701 irelaend = irela + sec->reloc_count;
2702 for (; irela < irelaend; irela++)
2704 enum elf_spu_reloc_type r_type;
2705 unsigned int r_indx;
2707 Elf_Internal_Sym *sym;
2708 struct elf_link_hash_entry *h;
2710 bfd_boolean nonbranch, is_call;
2711 struct function_info *caller;
2712 struct call_info *callee;
2714 r_type = ELF32_R_TYPE (irela->r_info);
2715 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2717 r_indx = ELF32_R_SYM (irela->r_info);
2718 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2722 || sym_sec->output_section == bfd_abs_section_ptr)
2728 unsigned char insn[4];
2730 if (!bfd_get_section_contents (sec->owner, sec, insn,
2731 irela->r_offset, 4))
2733 if (is_branch (insn))
2735 is_call = (insn[0] & 0xfd) == 0x31;
2736 priority = insn[1] & 0x0f;
2738 priority |= insn[2];
2740 priority |= insn[3];
2742 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2743 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2746 info->callbacks->einfo
2747 (_("%B(%A+0x%v): call to non-code section"
2748 " %B(%A), analysis incomplete\n"),
2749 sec->owner, sec, irela->r_offset,
2750 sym_sec->owner, sym_sec);
2765 /* For --auto-overlay, count possible stubs we need for
2766 function pointer references. */
2767 unsigned int sym_type;
2771 sym_type = ELF_ST_TYPE (sym->st_info);
2772 if (sym_type == STT_FUNC)
2774 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2775 spu_hash_table (info)->non_ovly_stub += 1;
2776 /* If the symbol type is STT_FUNC then this must be a
2777 function pointer initialisation. */
2780 /* Ignore data references. */
2781 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2782 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2784 /* Otherwise we probably have a jump table reloc for
2785 a switch statement or some other reference to a
2790 val = h->root.u.def.value;
2792 val = sym->st_value;
2793 val += irela->r_addend;
2797 struct function_info *fun;
2799 if (irela->r_addend != 0)
2801 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2804 fake->st_value = val;
2806 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2810 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2812 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2815 if (irela->r_addend != 0
2816 && fun->u.sym != sym)
2821 caller = find_function (sec, irela->r_offset, info);
2824 callee = bfd_malloc (sizeof *callee);
2828 callee->fun = find_function (sym_sec, val, info);
2829 if (callee->fun == NULL)
2831 callee->is_tail = !is_call;
2832 callee->is_pasted = FALSE;
2833 callee->broken_cycle = FALSE;
2834 callee->priority = priority;
2835 callee->count = nonbranch? 0 : 1;
2836 if (callee->fun->last_caller != sec)
2838 callee->fun->last_caller = sec;
2839 callee->fun->call_count += 1;
2841 if (!insert_callee (caller, callee))
2844 && !callee->fun->is_func
2845 && callee->fun->stack == 0)
2847 /* This is either a tail call or a branch from one part of
2848 the function to another, ie. hot/cold section. If the
2849 destination has been called by some other function then
2850 it is a separate function. We also assume that functions
2851 are not split across input files. */
2852 if (sec->owner != sym_sec->owner)
2854 callee->fun->start = NULL;
2855 callee->fun->is_func = TRUE;
2857 else if (callee->fun->start == NULL)
2859 struct function_info *caller_start = caller;
2860 while (caller_start->start)
2861 caller_start = caller_start->start;
2863 if (caller_start != callee->fun)
2864 callee->fun->start = caller_start;
2868 struct function_info *callee_start;
2869 struct function_info *caller_start;
2870 callee_start = callee->fun;
2871 while (callee_start->start)
2872 callee_start = callee_start->start;
2873 caller_start = caller;
2874 while (caller_start->start)
2875 caller_start = caller_start->start;
2876 if (caller_start != callee_start)
2878 callee->fun->start = NULL;
2879 callee->fun->is_func = TRUE;
2888 /* Handle something like .init or .fini, which has a piece of a function.
2889 These sections are pasted together to form a single function. */
2892 pasted_function (asection *sec)
2894 struct bfd_link_order *l;
2895 struct _spu_elf_section_data *sec_data;
2896 struct spu_elf_stack_info *sinfo;
2897 Elf_Internal_Sym *fake;
2898 struct function_info *fun, *fun_start;
2900 fake = bfd_zmalloc (sizeof (*fake));
2904 fake->st_size = sec->size;
2906 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2907 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2911 /* Find a function immediately preceding this section. */
2913 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2915 if (l->u.indirect.section == sec)
2917 if (fun_start != NULL)
2919 struct call_info *callee = bfd_malloc (sizeof *callee);
2923 fun->start = fun_start;
2925 callee->is_tail = TRUE;
2926 callee->is_pasted = TRUE;
2927 callee->broken_cycle = FALSE;
2928 callee->priority = 0;
2930 if (!insert_callee (fun_start, callee))
2936 if (l->type == bfd_indirect_link_order
2937 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2938 && (sinfo = sec_data->u.i.stack_info) != NULL
2939 && sinfo->num_fun != 0)
2940 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2943 /* Don't return an error if we did not find a function preceding this
2944 section. The section may have incorrect flags. */
2948 /* Map address ranges in code sections to functions. */
2951 discover_functions (struct bfd_link_info *info)
2955 Elf_Internal_Sym ***psym_arr;
2956 asection ***sec_arr;
2957 bfd_boolean gaps = FALSE;
2960 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2963 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2964 if (psym_arr == NULL)
2966 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2967 if (sec_arr == NULL)
2970 for (ibfd = info->input_bfds, bfd_idx = 0;
2972 ibfd = ibfd->link.next, bfd_idx++)
2974 extern const bfd_target spu_elf32_vec;
2975 Elf_Internal_Shdr *symtab_hdr;
2978 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2979 asection **psecs, **p;
2981 if (ibfd->xvec != &spu_elf32_vec)
2984 /* Read all the symbols. */
2985 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2986 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2990 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2991 if (interesting_section (sec))
2999 if (symtab_hdr->contents != NULL)
3001 /* Don't use cached symbols since the generic ELF linker
3002 code only reads local symbols, and we need globals too. */
3003 free (symtab_hdr->contents);
3004 symtab_hdr->contents = NULL;
3006 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3008 symtab_hdr->contents = (void *) syms;
3012 /* Select defined function symbols that are going to be output. */
3013 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3016 psym_arr[bfd_idx] = psyms;
3017 psecs = bfd_malloc (symcount * sizeof (*psecs));
3020 sec_arr[bfd_idx] = psecs;
3021 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3022 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3023 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3027 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3028 if (s != NULL && interesting_section (s))
3031 symcount = psy - psyms;
3034 /* Sort them by section and offset within section. */
3035 sort_syms_syms = syms;
3036 sort_syms_psecs = psecs;
3037 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3039 /* Now inspect the function symbols. */
3040 for (psy = psyms; psy < psyms + symcount; )
3042 asection *s = psecs[*psy - syms];
3043 Elf_Internal_Sym **psy2;
3045 for (psy2 = psy; ++psy2 < psyms + symcount; )
3046 if (psecs[*psy2 - syms] != s)
3049 if (!alloc_stack_info (s, psy2 - psy))
3054 /* First install info about properly typed and sized functions.
3055 In an ideal world this will cover all code sections, except
3056 when partitioning functions into hot and cold sections,
3057 and the horrible pasted together .init and .fini functions. */
3058 for (psy = psyms; psy < psyms + symcount; ++psy)
3061 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3063 asection *s = psecs[sy - syms];
3064 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3069 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3070 if (interesting_section (sec))
3071 gaps |= check_function_ranges (sec, info);
3076 /* See if we can discover more function symbols by looking at
3078 for (ibfd = info->input_bfds, bfd_idx = 0;
3080 ibfd = ibfd->link.next, bfd_idx++)
3084 if (psym_arr[bfd_idx] == NULL)
3087 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3088 if (!mark_functions_via_relocs (sec, info, FALSE))
3092 for (ibfd = info->input_bfds, bfd_idx = 0;
3094 ibfd = ibfd->link.next, bfd_idx++)
3096 Elf_Internal_Shdr *symtab_hdr;
3098 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3101 if ((psyms = psym_arr[bfd_idx]) == NULL)
3104 psecs = sec_arr[bfd_idx];
3106 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3107 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3110 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3111 if (interesting_section (sec))
3112 gaps |= check_function_ranges (sec, info);
3116 /* Finally, install all globals. */
3117 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3121 s = psecs[sy - syms];
3123 /* Global syms might be improperly typed functions. */
3124 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3125 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3127 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3133 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3135 extern const bfd_target spu_elf32_vec;
3138 if (ibfd->xvec != &spu_elf32_vec)
3141 /* Some of the symbols we've installed as marking the
3142 beginning of functions may have a size of zero. Extend
3143 the range of such functions to the beginning of the
3144 next symbol of interest. */
3145 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3146 if (interesting_section (sec))
3148 struct _spu_elf_section_data *sec_data;
3149 struct spu_elf_stack_info *sinfo;
3151 sec_data = spu_elf_section_data (sec);
3152 sinfo = sec_data->u.i.stack_info;
3153 if (sinfo != NULL && sinfo->num_fun != 0)
3156 bfd_vma hi = sec->size;
3158 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3160 sinfo->fun[fun_idx].hi = hi;
3161 hi = sinfo->fun[fun_idx].lo;
3164 sinfo->fun[0].lo = 0;
3166 /* No symbols in this section. Must be .init or .fini
3167 or something similar. */
3168 else if (!pasted_function (sec))
3174 for (ibfd = info->input_bfds, bfd_idx = 0;
3176 ibfd = ibfd->link.next, bfd_idx++)
3178 if (psym_arr[bfd_idx] == NULL)
3181 free (psym_arr[bfd_idx]);
3182 free (sec_arr[bfd_idx]);
3191 /* Iterate over all function_info we have collected, calling DOIT on
3192 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3196 for_each_node (bfd_boolean (*doit) (struct function_info *,
3197 struct bfd_link_info *,
3199 struct bfd_link_info *info,
3205 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3207 extern const bfd_target spu_elf32_vec;
3210 if (ibfd->xvec != &spu_elf32_vec)
3213 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3215 struct _spu_elf_section_data *sec_data;
3216 struct spu_elf_stack_info *sinfo;
3218 if ((sec_data = spu_elf_section_data (sec)) != NULL
3219 && (sinfo = sec_data->u.i.stack_info) != NULL)
3222 for (i = 0; i < sinfo->num_fun; ++i)
3223 if (!root_only || !sinfo->fun[i].non_root)
3224 if (!doit (&sinfo->fun[i], info, param))
3232 /* Transfer call info attached to struct function_info entries for
3233 all of a given function's sections to the first entry. */
3236 transfer_calls (struct function_info *fun,
3237 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3238 void *param ATTRIBUTE_UNUSED)
3240 struct function_info *start = fun->start;
3244 struct call_info *call, *call_next;
3246 while (start->start != NULL)
3247 start = start->start;
3248 for (call = fun->call_list; call != NULL; call = call_next)
3250 call_next = call->next;
3251 if (!insert_callee (start, call))
3254 fun->call_list = NULL;
3259 /* Mark nodes in the call graph that are called by some other node. */
3262 mark_non_root (struct function_info *fun,
3263 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3264 void *param ATTRIBUTE_UNUSED)
3266 struct call_info *call;
3271 for (call = fun->call_list; call; call = call->next)
3273 call->fun->non_root = TRUE;
3274 mark_non_root (call->fun, 0, 0);
3279 /* Remove cycles from the call graph. Set depth of nodes. */
3282 remove_cycles (struct function_info *fun,
3283 struct bfd_link_info *info,
3286 struct call_info **callp, *call;
3287 unsigned int depth = *(unsigned int *) param;
3288 unsigned int max_depth = depth;
3292 fun->marking = TRUE;
3294 callp = &fun->call_list;
3295 while ((call = *callp) != NULL)
3297 call->max_depth = depth + !call->is_pasted;
3298 if (!call->fun->visit2)
3300 if (!remove_cycles (call->fun, info, &call->max_depth))
3302 if (max_depth < call->max_depth)
3303 max_depth = call->max_depth;
3305 else if (call->fun->marking)
3307 struct spu_link_hash_table *htab = spu_hash_table (info);
3309 if (!htab->params->auto_overlay
3310 && htab->params->stack_analysis)
3312 const char *f1 = func_name (fun);
3313 const char *f2 = func_name (call->fun);
3315 info->callbacks->info (_("Stack analysis will ignore the call "
3320 call->broken_cycle = TRUE;
3322 callp = &call->next;
3324 fun->marking = FALSE;
3325 *(unsigned int *) param = max_depth;
3329 /* Check that we actually visited all nodes in remove_cycles. If we
3330 didn't, then there is some cycle in the call graph not attached to
3331 any root node. Arbitrarily choose a node in the cycle as a new
3332 root and break the cycle. */
3335 mark_detached_root (struct function_info *fun,
3336 struct bfd_link_info *info,
3341 fun->non_root = FALSE;
3342 *(unsigned int *) param = 0;
3343 return remove_cycles (fun, info, param);
3346 /* Populate call_list for each function. */
3349 build_call_tree (struct bfd_link_info *info)
3354 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3356 extern const bfd_target spu_elf32_vec;
3359 if (ibfd->xvec != &spu_elf32_vec)
3362 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3363 if (!mark_functions_via_relocs (sec, info, TRUE))
3367 /* Transfer call info from hot/cold section part of function
3369 if (!spu_hash_table (info)->params->auto_overlay
3370 && !for_each_node (transfer_calls, info, 0, FALSE))
3373 /* Find the call graph root(s). */
3374 if (!for_each_node (mark_non_root, info, 0, FALSE))
3377 /* Remove cycles from the call graph. We start from the root node(s)
3378 so that we break cycles in a reasonable place. */
3380 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3383 return for_each_node (mark_detached_root, info, &depth, FALSE);
3386 /* qsort predicate to sort calls by priority, max_depth then count. */
3389 sort_calls (const void *a, const void *b)
3391 struct call_info *const *c1 = a;
3392 struct call_info *const *c2 = b;
3395 delta = (*c2)->priority - (*c1)->priority;
3399 delta = (*c2)->max_depth - (*c1)->max_depth;
3403 delta = (*c2)->count - (*c1)->count;
3407 return (char *) c1 - (char *) c2;
3411 unsigned int max_overlay_size;
3414 /* Set linker_mark and gc_mark on any sections that we will put in
3415 overlays. These flags are used by the generic ELF linker, but we
3416 won't be continuing on to bfd_elf_final_link so it is OK to use
3417 them. linker_mark is clear before we get here. Set segment_mark
3418 on sections that are part of a pasted function (excluding the last
3421 Set up function rodata section if --overlay-rodata. We don't
3422 currently include merged string constant rodata sections since
3424 Sort the call graph so that the deepest nodes will be visited
3428 mark_overlay_section (struct function_info *fun,
3429 struct bfd_link_info *info,
3432 struct call_info *call;
3434 struct _mos_param *mos_param = param;
3435 struct spu_link_hash_table *htab = spu_hash_table (info);
3441 if (!fun->sec->linker_mark
3442 && (htab->params->ovly_flavour != ovly_soft_icache
3443 || htab->params->non_ia_text
3444 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3445 || strcmp (fun->sec->name, ".init") == 0
3446 || strcmp (fun->sec->name, ".fini") == 0))
3450 fun->sec->linker_mark = 1;
3451 fun->sec->gc_mark = 1;
3452 fun->sec->segment_mark = 0;
3453 /* Ensure SEC_CODE is set on this text section (it ought to
3454 be!), and SEC_CODE is clear on rodata sections. We use
3455 this flag to differentiate the two overlay section types. */
3456 fun->sec->flags |= SEC_CODE;
3458 size = fun->sec->size;
3459 if (htab->params->auto_overlay & OVERLAY_RODATA)
3463 /* Find the rodata section corresponding to this function's
3465 if (strcmp (fun->sec->name, ".text") == 0)
3467 name = bfd_malloc (sizeof (".rodata"));
3470 memcpy (name, ".rodata", sizeof (".rodata"));
3472 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3474 size_t len = strlen (fun->sec->name);
3475 name = bfd_malloc (len + 3);
3478 memcpy (name, ".rodata", sizeof (".rodata"));
3479 memcpy (name + 7, fun->sec->name + 5, len - 4);
3481 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3483 size_t len = strlen (fun->sec->name) + 1;
3484 name = bfd_malloc (len);
3487 memcpy (name, fun->sec->name, len);
3493 asection *rodata = NULL;
3494 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3495 if (group_sec == NULL)
3496 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3498 while (group_sec != NULL && group_sec != fun->sec)
3500 if (strcmp (group_sec->name, name) == 0)
3505 group_sec = elf_section_data (group_sec)->next_in_group;
3507 fun->rodata = rodata;
3510 size += fun->rodata->size;
3511 if (htab->params->line_size != 0
3512 && size > htab->params->line_size)
3514 size -= fun->rodata->size;
3519 fun->rodata->linker_mark = 1;
3520 fun->rodata->gc_mark = 1;
3521 fun->rodata->flags &= ~SEC_CODE;
3527 if (mos_param->max_overlay_size < size)
3528 mos_param->max_overlay_size = size;
3531 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3536 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3540 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3541 calls[count++] = call;
3543 qsort (calls, count, sizeof (*calls), sort_calls);
3545 fun->call_list = NULL;
3549 calls[count]->next = fun->call_list;
3550 fun->call_list = calls[count];
3555 for (call = fun->call_list; call != NULL; call = call->next)
3557 if (call->is_pasted)
3559 /* There can only be one is_pasted call per function_info. */
3560 BFD_ASSERT (!fun->sec->segment_mark);
3561 fun->sec->segment_mark = 1;
3563 if (!call->broken_cycle
3564 && !mark_overlay_section (call->fun, info, param))
3568 /* Don't put entry code into an overlay. The overlay manager needs
3569 a stack! Also, don't mark .ovl.init as an overlay. */
3570 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3571 == info->output_bfd->start_address
3572 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3574 fun->sec->linker_mark = 0;
3575 if (fun->rodata != NULL)
3576 fun->rodata->linker_mark = 0;
3581 /* If non-zero then unmark functions called from those within sections
3582 that we need to unmark. Unfortunately this isn't reliable since the
3583 call graph cannot know the destination of function pointer calls. */
3584 #define RECURSE_UNMARK 0
3587 asection *exclude_input_section;
3588 asection *exclude_output_section;
3589 unsigned long clearing;
3592 /* Undo some of mark_overlay_section's work. */
3595 unmark_overlay_section (struct function_info *fun,
3596 struct bfd_link_info *info,
3599 struct call_info *call;
3600 struct _uos_param *uos_param = param;
3601 unsigned int excluded = 0;
3609 if (fun->sec == uos_param->exclude_input_section
3610 || fun->sec->output_section == uos_param->exclude_output_section)
3614 uos_param->clearing += excluded;
3616 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3618 fun->sec->linker_mark = 0;
3620 fun->rodata->linker_mark = 0;
3623 for (call = fun->call_list; call != NULL; call = call->next)
3624 if (!call->broken_cycle
3625 && !unmark_overlay_section (call->fun, info, param))
3629 uos_param->clearing -= excluded;
3634 unsigned int lib_size;
3635 asection **lib_sections;
3638 /* Add sections we have marked as belonging to overlays to an array
3639 for consideration as non-overlay sections. The array consist of
3640 pairs of sections, (text,rodata), for functions in the call graph. */
3643 collect_lib_sections (struct function_info *fun,
3644 struct bfd_link_info *info,
3647 struct _cl_param *lib_param = param;
3648 struct call_info *call;
3655 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3658 size = fun->sec->size;
3660 size += fun->rodata->size;
3662 if (size <= lib_param->lib_size)
3664 *lib_param->lib_sections++ = fun->sec;
3665 fun->sec->gc_mark = 0;
3666 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3668 *lib_param->lib_sections++ = fun->rodata;
3669 fun->rodata->gc_mark = 0;
3672 *lib_param->lib_sections++ = NULL;
3675 for (call = fun->call_list; call != NULL; call = call->next)
3676 if (!call->broken_cycle)
3677 collect_lib_sections (call->fun, info, param);
3682 /* qsort predicate to sort sections by call count. */
3685 sort_lib (const void *a, const void *b)
3687 asection *const *s1 = a;
3688 asection *const *s2 = b;
3689 struct _spu_elf_section_data *sec_data;
3690 struct spu_elf_stack_info *sinfo;
3694 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3695 && (sinfo = sec_data->u.i.stack_info) != NULL)
3698 for (i = 0; i < sinfo->num_fun; ++i)
3699 delta -= sinfo->fun[i].call_count;
3702 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3703 && (sinfo = sec_data->u.i.stack_info) != NULL)
3706 for (i = 0; i < sinfo->num_fun; ++i)
3707 delta += sinfo->fun[i].call_count;
3716 /* Remove some sections from those marked to be in overlays. Choose
3717 those that are called from many places, likely library functions. */
3720 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3723 asection **lib_sections;
3724 unsigned int i, lib_count;
3725 struct _cl_param collect_lib_param;
3726 struct function_info dummy_caller;
3727 struct spu_link_hash_table *htab;
3729 memset (&dummy_caller, 0, sizeof (dummy_caller));
3731 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3733 extern const bfd_target spu_elf32_vec;
3736 if (ibfd->xvec != &spu_elf32_vec)
3739 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3740 if (sec->linker_mark
3741 && sec->size < lib_size
3742 && (sec->flags & SEC_CODE) != 0)
3745 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3746 if (lib_sections == NULL)
3747 return (unsigned int) -1;
3748 collect_lib_param.lib_size = lib_size;
3749 collect_lib_param.lib_sections = lib_sections;
3750 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3752 return (unsigned int) -1;
3753 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3755 /* Sort sections so that those with the most calls are first. */
3757 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3759 htab = spu_hash_table (info);
3760 for (i = 0; i < lib_count; i++)
3762 unsigned int tmp, stub_size;
3764 struct _spu_elf_section_data *sec_data;
3765 struct spu_elf_stack_info *sinfo;
3767 sec = lib_sections[2 * i];
3768 /* If this section is OK, its size must be less than lib_size. */
3770 /* If it has a rodata section, then add that too. */
3771 if (lib_sections[2 * i + 1])
3772 tmp += lib_sections[2 * i + 1]->size;
3773 /* Add any new overlay call stubs needed by the section. */
3776 && (sec_data = spu_elf_section_data (sec)) != NULL
3777 && (sinfo = sec_data->u.i.stack_info) != NULL)
3780 struct call_info *call;
3782 for (k = 0; k < sinfo->num_fun; ++k)
3783 for (call = sinfo->fun[k].call_list; call; call = call->next)
3784 if (call->fun->sec->linker_mark)
3786 struct call_info *p;
3787 for (p = dummy_caller.call_list; p; p = p->next)
3788 if (p->fun == call->fun)
3791 stub_size += ovl_stub_size (htab->params);
3794 if (tmp + stub_size < lib_size)
3796 struct call_info **pp, *p;
3798 /* This section fits. Mark it as non-overlay. */
3799 lib_sections[2 * i]->linker_mark = 0;
3800 if (lib_sections[2 * i + 1])
3801 lib_sections[2 * i + 1]->linker_mark = 0;
3802 lib_size -= tmp + stub_size;
3803 /* Call stubs to the section we just added are no longer
3805 pp = &dummy_caller.call_list;
3806 while ((p = *pp) != NULL)
3807 if (!p->fun->sec->linker_mark)
3809 lib_size += ovl_stub_size (htab->params);
3815 /* Add new call stubs to dummy_caller. */
3816 if ((sec_data = spu_elf_section_data (sec)) != NULL
3817 && (sinfo = sec_data->u.i.stack_info) != NULL)
3820 struct call_info *call;
3822 for (k = 0; k < sinfo->num_fun; ++k)
3823 for (call = sinfo->fun[k].call_list;
3826 if (call->fun->sec->linker_mark)
3828 struct call_info *callee;
3829 callee = bfd_malloc (sizeof (*callee));
3831 return (unsigned int) -1;
3833 if (!insert_callee (&dummy_caller, callee))
3839 while (dummy_caller.call_list != NULL)
3841 struct call_info *call = dummy_caller.call_list;
3842 dummy_caller.call_list = call->next;
3845 for (i = 0; i < 2 * lib_count; i++)
3846 if (lib_sections[i])
3847 lib_sections[i]->gc_mark = 1;
3848 free (lib_sections);
3852 /* Build an array of overlay sections. The deepest node's section is
3853 added first, then its parent node's section, then everything called
3854 from the parent section. The idea being to group sections to
3855 minimise calls between different overlays. */
3858 collect_overlays (struct function_info *fun,
3859 struct bfd_link_info *info,
3862 struct call_info *call;
3863 bfd_boolean added_fun;
3864 asection ***ovly_sections = param;
3870 for (call = fun->call_list; call != NULL; call = call->next)
3871 if (!call->is_pasted && !call->broken_cycle)
3873 if (!collect_overlays (call->fun, info, ovly_sections))
3879 if (fun->sec->linker_mark && fun->sec->gc_mark)
3881 fun->sec->gc_mark = 0;
3882 *(*ovly_sections)++ = fun->sec;
3883 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3885 fun->rodata->gc_mark = 0;
3886 *(*ovly_sections)++ = fun->rodata;
3889 *(*ovly_sections)++ = NULL;
3892 /* Pasted sections must stay with the first section. We don't
3893 put pasted sections in the array, just the first section.
3894 Mark subsequent sections as already considered. */
3895 if (fun->sec->segment_mark)
3897 struct function_info *call_fun = fun;
3900 for (call = call_fun->call_list; call != NULL; call = call->next)
3901 if (call->is_pasted)
3903 call_fun = call->fun;
3904 call_fun->sec->gc_mark = 0;
3905 if (call_fun->rodata)
3906 call_fun->rodata->gc_mark = 0;
3912 while (call_fun->sec->segment_mark);
3916 for (call = fun->call_list; call != NULL; call = call->next)
3917 if (!call->broken_cycle
3918 && !collect_overlays (call->fun, info, ovly_sections))
3923 struct _spu_elf_section_data *sec_data;
3924 struct spu_elf_stack_info *sinfo;
3926 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3927 && (sinfo = sec_data->u.i.stack_info) != NULL)
3930 for (i = 0; i < sinfo->num_fun; ++i)
3931 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3939 struct _sum_stack_param {
3941 size_t overall_stack;
3942 bfd_boolean emit_stack_syms;
3945 /* Descend the call graph for FUN, accumulating total stack required. */
3948 sum_stack (struct function_info *fun,
3949 struct bfd_link_info *info,
3952 struct call_info *call;
3953 struct function_info *max;
3954 size_t stack, cum_stack;
3956 bfd_boolean has_call;
3957 struct _sum_stack_param *sum_stack_param = param;
3958 struct spu_link_hash_table *htab;
3960 cum_stack = fun->stack;
3961 sum_stack_param->cum_stack = cum_stack;
3967 for (call = fun->call_list; call; call = call->next)
3969 if (call->broken_cycle)
3971 if (!call->is_pasted)
3973 if (!sum_stack (call->fun, info, sum_stack_param))
3975 stack = sum_stack_param->cum_stack;
3976 /* Include caller stack for normal calls, don't do so for
3977 tail calls. fun->stack here is local stack usage for
3979 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3980 stack += fun->stack;
3981 if (cum_stack < stack)
3988 sum_stack_param->cum_stack = cum_stack;
3990 /* Now fun->stack holds cumulative stack. */
3991 fun->stack = cum_stack;
3995 && sum_stack_param->overall_stack < cum_stack)
3996 sum_stack_param->overall_stack = cum_stack;
3998 htab = spu_hash_table (info);
3999 if (htab->params->auto_overlay)
4002 f1 = func_name (fun);
4003 if (htab->params->stack_analysis)
4006 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
4007 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
4008 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4012 info->callbacks->minfo (_(" calls:\n"));
4013 for (call = fun->call_list; call; call = call->next)
4014 if (!call->is_pasted && !call->broken_cycle)
4016 const char *f2 = func_name (call->fun);
4017 const char *ann1 = call->fun == max ? "*" : " ";
4018 const char *ann2 = call->is_tail ? "t" : " ";
4020 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
4025 if (sum_stack_param->emit_stack_syms)
4027 char *name = bfd_malloc (18 + strlen (f1));
4028 struct elf_link_hash_entry *h;
4033 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4034 sprintf (name, "__stack_%s", f1);
4036 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4038 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4041 && (h->root.type == bfd_link_hash_new
4042 || h->root.type == bfd_link_hash_undefined
4043 || h->root.type == bfd_link_hash_undefweak))
4045 h->root.type = bfd_link_hash_defined;
4046 h->root.u.def.section = bfd_abs_section_ptr;
4047 h->root.u.def.value = cum_stack;
4052 h->ref_regular_nonweak = 1;
4053 h->forced_local = 1;
4061 /* SEC is part of a pasted function. Return the call_info for the
4062 next section of this function. */
4064 static struct call_info *
4065 find_pasted_call (asection *sec)
4067 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4068 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4069 struct call_info *call;
4072 for (k = 0; k < sinfo->num_fun; ++k)
4073 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4074 if (call->is_pasted)
4080 /* qsort predicate to sort bfds by file name. */
4083 sort_bfds (const void *a, const void *b)
4085 bfd *const *abfd1 = a;
4086 bfd *const *abfd2 = b;
4088 return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4092 print_one_overlay_section (FILE *script,
4095 unsigned int ovlynum,
4096 unsigned int *ovly_map,
4097 asection **ovly_sections,
4098 struct bfd_link_info *info)
4102 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4104 asection *sec = ovly_sections[2 * j];
4106 if (fprintf (script, " %s%c%s (%s)\n",
4107 (sec->owner->my_archive != NULL
4108 ? sec->owner->my_archive->filename : ""),
4109 info->path_separator,
4110 sec->owner->filename,
4113 if (sec->segment_mark)
4115 struct call_info *call = find_pasted_call (sec);
4116 while (call != NULL)
4118 struct function_info *call_fun = call->fun;
4119 sec = call_fun->sec;
4120 if (fprintf (script, " %s%c%s (%s)\n",
4121 (sec->owner->my_archive != NULL
4122 ? sec->owner->my_archive->filename : ""),
4123 info->path_separator,
4124 sec->owner->filename,
4127 for (call = call_fun->call_list; call; call = call->next)
4128 if (call->is_pasted)
4134 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4136 asection *sec = ovly_sections[2 * j + 1];
4138 && fprintf (script, " %s%c%s (%s)\n",
4139 (sec->owner->my_archive != NULL
4140 ? sec->owner->my_archive->filename : ""),
4141 info->path_separator,
4142 sec->owner->filename,
4146 sec = ovly_sections[2 * j];
4147 if (sec->segment_mark)
4149 struct call_info *call = find_pasted_call (sec);
4150 while (call != NULL)
4152 struct function_info *call_fun = call->fun;
4153 sec = call_fun->rodata;
4155 && fprintf (script, " %s%c%s (%s)\n",
4156 (sec->owner->my_archive != NULL
4157 ? sec->owner->my_archive->filename : ""),
4158 info->path_separator,
4159 sec->owner->filename,
4162 for (call = call_fun->call_list; call; call = call->next)
4163 if (call->is_pasted)
4172 /* Handle --auto-overlay. */
4175 spu_elf_auto_overlay (struct bfd_link_info *info)
4179 struct elf_segment_map *m;
4180 unsigned int fixed_size, lo, hi;
4181 unsigned int reserved;
4182 struct spu_link_hash_table *htab;
4183 unsigned int base, i, count, bfd_count;
4184 unsigned int region, ovlynum;
4185 asection **ovly_sections, **ovly_p;
4186 unsigned int *ovly_map;
4188 unsigned int total_overlay_size, overlay_size;
4189 const char *ovly_mgr_entry;
4190 struct elf_link_hash_entry *h;
4191 struct _mos_param mos_param;
4192 struct _uos_param uos_param;
4193 struct function_info dummy_caller;
4195 /* Find the extents of our loadable image. */
4196 lo = (unsigned int) -1;
4198 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4199 if (m->p_type == PT_LOAD)
4200 for (i = 0; i < m->count; i++)
4201 if (m->sections[i]->size != 0)
4203 if (m->sections[i]->vma < lo)
4204 lo = m->sections[i]->vma;
4205 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4206 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4208 fixed_size = hi + 1 - lo;
4210 if (!discover_functions (info))
4213 if (!build_call_tree (info))
4216 htab = spu_hash_table (info);
4217 reserved = htab->params->auto_overlay_reserved;
4220 struct _sum_stack_param sum_stack_param;
4222 sum_stack_param.emit_stack_syms = 0;
4223 sum_stack_param.overall_stack = 0;
4224 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4226 reserved = (sum_stack_param.overall_stack
4227 + htab->params->extra_stack_space);
4230 /* No need for overlays if everything already fits. */
4231 if (fixed_size + reserved <= htab->local_store
4232 && htab->params->ovly_flavour != ovly_soft_icache)
4234 htab->params->auto_overlay = 0;
4238 uos_param.exclude_input_section = 0;
4239 uos_param.exclude_output_section
4240 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4242 ovly_mgr_entry = "__ovly_load";
4243 if (htab->params->ovly_flavour == ovly_soft_icache)
4244 ovly_mgr_entry = "__icache_br_handler";
4245 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4246 FALSE, FALSE, FALSE);
4248 && (h->root.type == bfd_link_hash_defined
4249 || h->root.type == bfd_link_hash_defweak)
4252 /* We have a user supplied overlay manager. */
4253 uos_param.exclude_input_section = h->root.u.def.section;
4257 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4258 builtin version to .text, and will adjust .text size. */
4259 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4262 /* Mark overlay sections, and find max overlay section size. */
4263 mos_param.max_overlay_size = 0;
4264 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4267 /* We can't put the overlay manager or interrupt routines in
4269 uos_param.clearing = 0;
4270 if ((uos_param.exclude_input_section
4271 || uos_param.exclude_output_section)
4272 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4276 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4278 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4279 if (bfd_arr == NULL)
4282 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4285 total_overlay_size = 0;
4286 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4288 extern const bfd_target spu_elf32_vec;
4290 unsigned int old_count;
4292 if (ibfd->xvec != &spu_elf32_vec)
4296 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4297 if (sec->linker_mark)
4299 if ((sec->flags & SEC_CODE) != 0)
4301 fixed_size -= sec->size;
4302 total_overlay_size += sec->size;
4304 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4305 && sec->output_section->owner == info->output_bfd
4306 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4307 fixed_size -= sec->size;
4308 if (count != old_count)
4309 bfd_arr[bfd_count++] = ibfd;
4312 /* Since the overlay link script selects sections by file name and
4313 section name, ensure that file names are unique. */
4316 bfd_boolean ok = TRUE;
4318 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4319 for (i = 1; i < bfd_count; ++i)
4320 if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4322 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4324 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4325 info->callbacks->einfo (_("%s duplicated in %s\n"),
4326 bfd_arr[i]->filename,
4327 bfd_arr[i]->my_archive->filename);
4329 info->callbacks->einfo (_("%s duplicated\n"),
4330 bfd_arr[i]->filename);
4336 info->callbacks->einfo (_("sorry, no support for duplicate "
4337 "object files in auto-overlay script\n"));
4338 bfd_set_error (bfd_error_bad_value);
4344 fixed_size += reserved;
4345 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4346 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4348 if (htab->params->ovly_flavour == ovly_soft_icache)
4350 /* Stubs in the non-icache area are bigger. */
4351 fixed_size += htab->non_ovly_stub * 16;
4352 /* Space for icache manager tables.
4353 a) Tag array, one quadword per cache line.
4354 - word 0: ia address of present line, init to zero. */
4355 fixed_size += 16 << htab->num_lines_log2;
4356 /* b) Rewrite "to" list, one quadword per cache line. */
4357 fixed_size += 16 << htab->num_lines_log2;
4358 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4359 to a power-of-two number of full quadwords) per cache line. */
4360 fixed_size += 16 << (htab->fromelem_size_log2
4361 + htab->num_lines_log2);
4362 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4367 /* Guess number of overlays. Assuming overlay buffer is on
4368 average only half full should be conservative. */
4369 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4370 / (htab->local_store - fixed_size));
4371 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4372 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4376 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4377 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4378 "size of 0x%v exceeds local store\n"),
4379 (bfd_vma) fixed_size,
4380 (bfd_vma) mos_param.max_overlay_size);
4382 /* Now see if we should put some functions in the non-overlay area. */
4383 else if (fixed_size < htab->params->auto_overlay_fixed)
4385 unsigned int max_fixed, lib_size;
4387 max_fixed = htab->local_store - mos_param.max_overlay_size;
4388 if (max_fixed > htab->params->auto_overlay_fixed)
4389 max_fixed = htab->params->auto_overlay_fixed;
4390 lib_size = max_fixed - fixed_size;
4391 lib_size = auto_ovl_lib_functions (info, lib_size);
4392 if (lib_size == (unsigned int) -1)
4394 fixed_size = max_fixed - lib_size;
4397 /* Build an array of sections, suitably sorted to place into
4399 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4400 if (ovly_sections == NULL)
4402 ovly_p = ovly_sections;
4403 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4405 count = (size_t) (ovly_p - ovly_sections) / 2;
4406 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4407 if (ovly_map == NULL)
4410 memset (&dummy_caller, 0, sizeof (dummy_caller));
4411 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4412 if (htab->params->line_size != 0)
4413 overlay_size = htab->params->line_size;
4416 while (base < count)
4418 unsigned int size = 0, rosize = 0, roalign = 0;
4420 for (i = base; i < count; i++)
4422 asection *sec, *rosec;
4423 unsigned int tmp, rotmp;
4424 unsigned int num_stubs;
4425 struct call_info *call, *pasty;
4426 struct _spu_elf_section_data *sec_data;
4427 struct spu_elf_stack_info *sinfo;
4430 /* See whether we can add this section to the current
4431 overlay without overflowing our overlay buffer. */
4432 sec = ovly_sections[2 * i];
4433 tmp = align_power (size, sec->alignment_power) + sec->size;
4435 rosec = ovly_sections[2 * i + 1];
4438 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4439 if (roalign < rosec->alignment_power)
4440 roalign = rosec->alignment_power;
4442 if (align_power (tmp, roalign) + rotmp > overlay_size)
4444 if (sec->segment_mark)
4446 /* Pasted sections must stay together, so add their
4448 pasty = find_pasted_call (sec);
4449 while (pasty != NULL)
4451 struct function_info *call_fun = pasty->fun;
4452 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4453 + call_fun->sec->size);
4454 if (call_fun->rodata)
4456 rotmp = (align_power (rotmp,
4457 call_fun->rodata->alignment_power)
4458 + call_fun->rodata->size);
4459 if (roalign < rosec->alignment_power)
4460 roalign = rosec->alignment_power;
4462 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4463 if (pasty->is_pasted)
4467 if (align_power (tmp, roalign) + rotmp > overlay_size)
4470 /* If we add this section, we might need new overlay call
4471 stubs. Add any overlay section calls to dummy_call. */
4473 sec_data = spu_elf_section_data (sec);
4474 sinfo = sec_data->u.i.stack_info;
4475 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4476 for (call = sinfo->fun[k].call_list; call; call = call->next)
4477 if (call->is_pasted)
4479 BFD_ASSERT (pasty == NULL);
4482 else if (call->fun->sec->linker_mark)
4484 if (!copy_callee (&dummy_caller, call))
4487 while (pasty != NULL)
4489 struct function_info *call_fun = pasty->fun;
4491 for (call = call_fun->call_list; call; call = call->next)
4492 if (call->is_pasted)
4494 BFD_ASSERT (pasty == NULL);
4497 else if (!copy_callee (&dummy_caller, call))
4501 /* Calculate call stub size. */
4503 for (call = dummy_caller.call_list; call; call = call->next)
4505 unsigned int stub_delta = 1;
4507 if (htab->params->ovly_flavour == ovly_soft_icache)
4508 stub_delta = call->count;
4509 num_stubs += stub_delta;
4511 /* If the call is within this overlay, we won't need a
4513 for (k = base; k < i + 1; k++)
4514 if (call->fun->sec == ovly_sections[2 * k])
4516 num_stubs -= stub_delta;
4520 if (htab->params->ovly_flavour == ovly_soft_icache
4521 && num_stubs > htab->params->max_branch)
4523 if (align_power (tmp, roalign) + rotmp
4524 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4532 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4533 ovly_sections[2 * i]->owner,
4534 ovly_sections[2 * i],
4535 ovly_sections[2 * i + 1] ? " + rodata" : "");
4536 bfd_set_error (bfd_error_bad_value);
4540 while (dummy_caller.call_list != NULL)
4542 struct call_info *call = dummy_caller.call_list;
4543 dummy_caller.call_list = call->next;
4549 ovly_map[base++] = ovlynum;
4552 script = htab->params->spu_elf_open_overlay_script ();
4554 if (htab->params->ovly_flavour == ovly_soft_icache)
4556 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4559 if (fprintf (script,
4560 " . = ALIGN (%u);\n"
4561 " .ovl.init : { *(.ovl.init) }\n"
4562 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4563 htab->params->line_size) <= 0)
4568 while (base < count)
4570 unsigned int indx = ovlynum - 1;
4571 unsigned int vma, lma;
4573 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4574 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4576 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4577 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4578 ovlynum, vma, lma) <= 0)
4581 base = print_one_overlay_section (script, base, count, ovlynum,
4582 ovly_map, ovly_sections, info);
4583 if (base == (unsigned) -1)
4586 if (fprintf (script, " }\n") <= 0)
4592 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4593 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4596 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4601 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4604 if (fprintf (script,
4605 " . = ALIGN (16);\n"
4606 " .ovl.init : { *(.ovl.init) }\n"
4607 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4610 for (region = 1; region <= htab->params->num_lines; region++)
4614 while (base < count && ovly_map[base] < ovlynum)
4622 /* We need to set lma since we are overlaying .ovl.init. */
4623 if (fprintf (script,
4624 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4629 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4633 while (base < count)
4635 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4638 base = print_one_overlay_section (script, base, count, ovlynum,
4639 ovly_map, ovly_sections, info);
4640 if (base == (unsigned) -1)
4643 if (fprintf (script, " }\n") <= 0)
4646 ovlynum += htab->params->num_lines;
4647 while (base < count && ovly_map[base] < ovlynum)
4651 if (fprintf (script, " }\n") <= 0)
4655 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4660 free (ovly_sections);
4662 if (fclose (script) != 0)
4665 if (htab->params->auto_overlay & AUTO_RELINK)
4666 (*htab->params->spu_elf_relink) ();
4671 bfd_set_error (bfd_error_system_call);
4673 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4677 /* Provide an estimate of total stack required. */
4680 spu_elf_stack_analysis (struct bfd_link_info *info)
4682 struct spu_link_hash_table *htab;
4683 struct _sum_stack_param sum_stack_param;
4685 if (!discover_functions (info))
4688 if (!build_call_tree (info))
4691 htab = spu_hash_table (info);
4692 if (htab->params->stack_analysis)
4694 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4695 info->callbacks->minfo (_("\nStack size for functions. "
4696 "Annotations: '*' max stack, 't' tail call\n"));
4699 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4700 sum_stack_param.overall_stack = 0;
4701 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4704 if (htab->params->stack_analysis)
4705 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4706 (bfd_vma) sum_stack_param.overall_stack);
4710 /* Perform a final link. */
4713 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4715 struct spu_link_hash_table *htab = spu_hash_table (info);
4717 if (htab->params->auto_overlay)
4718 spu_elf_auto_overlay (info);
4720 if ((htab->params->stack_analysis
4721 || (htab->params->ovly_flavour == ovly_soft_icache
4722 && htab->params->lrlive_analysis))
4723 && !spu_elf_stack_analysis (info))
4724 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4726 if (!spu_elf_build_stubs (info))
4727 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4729 return bfd_elf_final_link (output_bfd, info);
4732 /* Called when not normally emitting relocs, ie. !info->relocatable
4733 and !info->emitrelocations. Returns a count of special relocs
4734 that need to be emitted. */
4737 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4739 Elf_Internal_Rela *relocs;
4740 unsigned int count = 0;
4742 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4746 Elf_Internal_Rela *rel;
4747 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4749 for (rel = relocs; rel < relend; rel++)
4751 int r_type = ELF32_R_TYPE (rel->r_info);
4752 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4756 if (elf_section_data (sec)->relocs != relocs)
4763 /* Functions for adding fixup records to .fixup */
4765 #define FIXUP_RECORD_SIZE 4
4767 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4768 bfd_put_32 (output_bfd, addr, \
4769 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4770 #define FIXUP_GET(output_bfd,htab,index) \
4771 bfd_get_32 (output_bfd, \
4772 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4774 /* Store OFFSET in .fixup. This assumes it will be called with an
4775 increasing OFFSET. When this OFFSET fits with the last base offset,
4776 it just sets a bit, otherwise it adds a new fixup record. */
4778 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4781 struct spu_link_hash_table *htab = spu_hash_table (info);
4782 asection *sfixup = htab->sfixup;
4783 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4784 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4785 if (sfixup->reloc_count == 0)
4787 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4788 sfixup->reloc_count++;
4792 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4793 if (qaddr != (base & ~(bfd_vma) 15))
4795 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4796 (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4797 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4798 sfixup->reloc_count++;
4801 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4805 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4808 spu_elf_relocate_section (bfd *output_bfd,
4809 struct bfd_link_info *info,
4811 asection *input_section,
4813 Elf_Internal_Rela *relocs,
4814 Elf_Internal_Sym *local_syms,
4815 asection **local_sections)
4817 Elf_Internal_Shdr *symtab_hdr;
4818 struct elf_link_hash_entry **sym_hashes;
4819 Elf_Internal_Rela *rel, *relend;
4820 struct spu_link_hash_table *htab;
4823 bfd_boolean emit_these_relocs = FALSE;
4824 bfd_boolean is_ea_sym;
4826 unsigned int iovl = 0;
4828 htab = spu_hash_table (info);
4829 stubs = (htab->stub_sec != NULL
4830 && maybe_needs_stubs (input_section));
4831 iovl = overlay_index (input_section);
4832 ea = bfd_get_section_by_name (output_bfd, "._ea");
4833 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4834 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4837 relend = relocs + input_section->reloc_count;
4838 for (; rel < relend; rel++)
4841 reloc_howto_type *howto;
4842 unsigned int r_symndx;
4843 Elf_Internal_Sym *sym;
4845 struct elf_link_hash_entry *h;
4846 const char *sym_name;
4849 bfd_reloc_status_type r;
4850 bfd_boolean unresolved_reloc;
4851 enum _stub_type stub_type;
4853 r_symndx = ELF32_R_SYM (rel->r_info);
4854 r_type = ELF32_R_TYPE (rel->r_info);
4855 howto = elf_howto_table + r_type;
4856 unresolved_reloc = FALSE;
4860 if (r_symndx < symtab_hdr->sh_info)
4862 sym = local_syms + r_symndx;
4863 sec = local_sections[r_symndx];
4864 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4865 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4869 if (sym_hashes == NULL)
4872 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4874 if (info->wrap_hash != NULL
4875 && (input_section->flags & SEC_DEBUGGING) != 0)
4876 h = ((struct elf_link_hash_entry *)
4877 unwrap_hash_lookup (info, input_bfd, &h->root));
4879 while (h->root.type == bfd_link_hash_indirect
4880 || h->root.type == bfd_link_hash_warning)
4881 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4884 if (h->root.type == bfd_link_hash_defined
4885 || h->root.type == bfd_link_hash_defweak)
4887 sec = h->root.u.def.section;
4889 || sec->output_section == NULL)
4890 /* Set a flag that will be cleared later if we find a
4891 relocation value for this symbol. output_section
4892 is typically NULL for symbols satisfied by a shared
4894 unresolved_reloc = TRUE;
4896 relocation = (h->root.u.def.value
4897 + sec->output_section->vma
4898 + sec->output_offset);
4900 else if (h->root.type == bfd_link_hash_undefweak)
4902 else if (info->unresolved_syms_in_objects == RM_IGNORE
4903 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4905 else if (!info->relocatable
4906 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4909 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4910 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4911 if (!info->callbacks->undefined_symbol (info,
4912 h->root.root.string,
4915 rel->r_offset, err))
4918 sym_name = h->root.root.string;
4921 if (sec != NULL && discarded_section (sec))
4922 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4923 rel, 1, relend, howto, 0, contents);
4925 if (info->relocatable)
4928 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4929 if (r_type == R_SPU_ADD_PIC
4931 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4933 bfd_byte *loc = contents + rel->r_offset;
4939 is_ea_sym = (ea != NULL
4941 && sec->output_section == ea);
4943 /* If this symbol is in an overlay area, we may need to relocate
4944 to the overlay stub. */
4945 addend = rel->r_addend;
4948 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4949 contents, info)) != no_stub)
4951 unsigned int ovl = 0;
4952 struct got_entry *g, **head;
4954 if (stub_type != nonovl_stub)
4958 head = &h->got.glist;
4960 head = elf_local_got_ents (input_bfd) + r_symndx;
4962 for (g = *head; g != NULL; g = g->next)
4963 if (htab->params->ovly_flavour == ovly_soft_icache
4965 && g->br_addr == (rel->r_offset
4966 + input_section->output_offset
4967 + input_section->output_section->vma))
4968 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4973 relocation = g->stub_addr;
4978 /* For soft icache, encode the overlay index into addresses. */
4979 if (htab->params->ovly_flavour == ovly_soft_icache
4980 && (r_type == R_SPU_ADDR16_HI
4981 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4984 unsigned int ovl = overlay_index (sec);
4987 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4988 relocation += set_id << 18;
4993 if (htab->params->emit_fixups && !info->relocatable
4994 && (input_section->flags & SEC_ALLOC) != 0
4995 && r_type == R_SPU_ADDR32)
4998 offset = rel->r_offset + input_section->output_section->vma
4999 + input_section->output_offset;
5000 spu_elf_emit_fixup (output_bfd, info, offset);
5003 if (unresolved_reloc)
5005 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5009 /* ._ea is a special section that isn't allocated in SPU
5010 memory, but rather occupies space in PPU memory as
5011 part of an embedded ELF image. If this reloc is
5012 against a symbol defined in ._ea, then transform the
5013 reloc into an equivalent one without a symbol
5014 relative to the start of the ELF image. */
5015 rel->r_addend += (relocation
5017 + elf_section_data (ea)->this_hdr.sh_offset);
5018 rel->r_info = ELF32_R_INFO (0, r_type);
5020 emit_these_relocs = TRUE;
5024 unresolved_reloc = TRUE;
5026 if (unresolved_reloc
5027 && _bfd_elf_section_offset (output_bfd, info, input_section,
5028 rel->r_offset) != (bfd_vma) -1)
5030 (*_bfd_error_handler)
5031 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5033 bfd_get_section_name (input_bfd, input_section),
5034 (long) rel->r_offset,
5040 r = _bfd_final_link_relocate (howto,
5044 rel->r_offset, relocation, addend);
5046 if (r != bfd_reloc_ok)
5048 const char *msg = (const char *) 0;
5052 case bfd_reloc_overflow:
5053 if (!((*info->callbacks->reloc_overflow)
5054 (info, (h ? &h->root : NULL), sym_name, howto->name,
5055 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5059 case bfd_reloc_undefined:
5060 if (!((*info->callbacks->undefined_symbol)
5061 (info, sym_name, input_bfd, input_section,
5062 rel->r_offset, TRUE)))
5066 case bfd_reloc_outofrange:
5067 msg = _("internal error: out of range error");
5070 case bfd_reloc_notsupported:
5071 msg = _("internal error: unsupported relocation error");
5074 case bfd_reloc_dangerous:
5075 msg = _("internal error: dangerous error");
5079 msg = _("internal error: unknown error");
5084 if (!((*info->callbacks->warning)
5085 (info, msg, sym_name, input_bfd, input_section,
5094 && emit_these_relocs
5095 && !info->emitrelocations)
5097 Elf_Internal_Rela *wrel;
5098 Elf_Internal_Shdr *rel_hdr;
5100 wrel = rel = relocs;
5101 relend = relocs + input_section->reloc_count;
5102 for (; rel < relend; rel++)
5106 r_type = ELF32_R_TYPE (rel->r_info);
5107 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5110 input_section->reloc_count = wrel - relocs;
5111 /* Backflips for _bfd_elf_link_output_relocs. */
5112 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5113 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5121 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5122 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5127 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5130 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5131 const char *sym_name ATTRIBUTE_UNUSED,
5132 Elf_Internal_Sym *sym,
5133 asection *sym_sec ATTRIBUTE_UNUSED,
5134 struct elf_link_hash_entry *h)
5136 struct spu_link_hash_table *htab = spu_hash_table (info);
5138 if (!info->relocatable
5139 && htab->stub_sec != NULL
5141 && (h->root.type == bfd_link_hash_defined
5142 || h->root.type == bfd_link_hash_defweak)
5144 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5146 struct got_entry *g;
5148 for (g = h->got.glist; g != NULL; g = g->next)
5149 if (htab->params->ovly_flavour == ovly_soft_icache
5150 ? g->br_addr == g->stub_addr
5151 : g->addend == 0 && g->ovl == 0)
5153 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5154 (htab->stub_sec[0]->output_section->owner,
5155 htab->stub_sec[0]->output_section));
5156 sym->st_value = g->stub_addr;
5164 static int spu_plugin = 0;
5167 spu_elf_plugin (int val)
5172 /* Set ELF header e_type for plugins. */
5175 spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
5179 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5181 i_ehdrp->e_type = ET_DYN;
5184 _bfd_elf_post_process_headers (abfd, info);
5187 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5188 segments for overlays. */
5191 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5198 struct spu_link_hash_table *htab = spu_hash_table (info);
5199 extra = htab->num_overlays;
5205 sec = bfd_get_section_by_name (abfd, ".toe");
5206 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5212 /* Remove .toe section from other PT_LOAD segments and put it in
5213 a segment of its own. Put overlays in separate segments too. */
5216 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5219 struct elf_segment_map *m, *m_overlay;
5220 struct elf_segment_map **p, **p_overlay;
5226 toe = bfd_get_section_by_name (abfd, ".toe");
5227 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5228 if (m->p_type == PT_LOAD && m->count > 1)
5229 for (i = 0; i < m->count; i++)
5230 if ((s = m->sections[i]) == toe
5231 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5233 struct elf_segment_map *m2;
5236 if (i + 1 < m->count)
5238 amt = sizeof (struct elf_segment_map);
5239 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5240 m2 = bfd_zalloc (abfd, amt);
5243 m2->count = m->count - (i + 1);
5244 memcpy (m2->sections, m->sections + i + 1,
5245 m2->count * sizeof (m->sections[0]));
5246 m2->p_type = PT_LOAD;
5254 amt = sizeof (struct elf_segment_map);
5255 m2 = bfd_zalloc (abfd, amt);
5258 m2->p_type = PT_LOAD;
5260 m2->sections[0] = s;
5268 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5269 PT_LOAD segments. This can cause the .ovl.init section to be
5270 overwritten with the contents of some overlay segment. To work
5271 around this issue, we ensure that all PF_OVERLAY segments are
5272 sorted first amongst the program headers; this ensures that even
5273 with a broken loader, the .ovl.init section (which is not marked
5274 as PF_OVERLAY) will be placed into SPU local store on startup. */
5276 /* Move all overlay segments onto a separate list. */
5277 p = &elf_seg_map (abfd);
5278 p_overlay = &m_overlay;
5281 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5282 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5287 p_overlay = &m->next;
5294 /* Re-insert overlay segments at the head of the segment map. */
5295 *p_overlay = elf_seg_map (abfd);
5296 elf_seg_map (abfd) = m_overlay;
5301 /* Tweak the section type of .note.spu_name. */
5304 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5305 Elf_Internal_Shdr *hdr,
5308 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5309 hdr->sh_type = SHT_NOTE;
5313 /* Tweak phdrs before writing them out. */
5316 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5318 const struct elf_backend_data *bed;
5319 struct elf_obj_tdata *tdata;
5320 Elf_Internal_Phdr *phdr, *last;
5321 struct spu_link_hash_table *htab;
5328 bed = get_elf_backend_data (abfd);
5329 tdata = elf_tdata (abfd);
5331 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5332 htab = spu_hash_table (info);
5333 if (htab->num_overlays != 0)
5335 struct elf_segment_map *m;
5338 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5340 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5342 /* Mark this as an overlay header. */
5343 phdr[i].p_flags |= PF_OVERLAY;
5345 if (htab->ovtab != NULL && htab->ovtab->size != 0
5346 && htab->params->ovly_flavour != ovly_soft_icache)
5348 bfd_byte *p = htab->ovtab->contents;
5349 unsigned int off = o * 16 + 8;
5351 /* Write file_off into _ovly_table. */
5352 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5355 /* Soft-icache has its file offset put in .ovl.init. */
5356 if (htab->init != NULL && htab->init->size != 0)
5358 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5360 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5364 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5365 of 16. This should always be possible when using the standard
5366 linker scripts, but don't create overlapping segments if
5367 someone is playing games with linker scripts. */
5369 for (i = count; i-- != 0; )
5370 if (phdr[i].p_type == PT_LOAD)
5374 adjust = -phdr[i].p_filesz & 15;
5377 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5380 adjust = -phdr[i].p_memsz & 15;
5383 && phdr[i].p_filesz != 0
5384 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5385 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5388 if (phdr[i].p_filesz != 0)
5392 if (i == (unsigned int) -1)
5393 for (i = count; i-- != 0; )
5394 if (phdr[i].p_type == PT_LOAD)
5398 adjust = -phdr[i].p_filesz & 15;
5399 phdr[i].p_filesz += adjust;
5401 adjust = -phdr[i].p_memsz & 15;
5402 phdr[i].p_memsz += adjust;
5409 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5411 struct spu_link_hash_table *htab = spu_hash_table (info);
5412 if (htab->params->emit_fixups)
5414 asection *sfixup = htab->sfixup;
5415 int fixup_count = 0;
5419 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5423 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5426 /* Walk over each section attached to the input bfd. */
5427 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5429 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5432 /* If there aren't any relocs, then there's nothing more
5434 if ((isec->flags & SEC_ALLOC) == 0
5435 || (isec->flags & SEC_RELOC) == 0
5436 || isec->reloc_count == 0)
5439 /* Get the relocs. */
5441 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5443 if (internal_relocs == NULL)
5446 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5447 relocations. They are stored in a single word by
5448 saving the upper 28 bits of the address and setting the
5449 lower 4 bits to a bit mask of the words that have the
5450 relocation. BASE_END keeps track of the next quadword. */
5451 irela = internal_relocs;
5452 irelaend = irela + isec->reloc_count;
5454 for (; irela < irelaend; irela++)
5455 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5456 && irela->r_offset >= base_end)
5458 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5464 /* We always have a NULL fixup as a sentinel */
5465 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5466 if (!bfd_set_section_size (output_bfd, sfixup, size))
5468 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5469 if (sfixup->contents == NULL)
5475 #define TARGET_BIG_SYM spu_elf32_vec
5476 #define TARGET_BIG_NAME "elf32-spu"
5477 #define ELF_ARCH bfd_arch_spu
5478 #define ELF_TARGET_ID SPU_ELF_DATA
5479 #define ELF_MACHINE_CODE EM_SPU
5480 /* This matches the alignment need for DMA. */
5481 #define ELF_MAXPAGESIZE 0x80
5482 #define elf_backend_rela_normal 1
5483 #define elf_backend_can_gc_sections 1
5485 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5486 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5487 #define elf_info_to_howto spu_elf_info_to_howto
5488 #define elf_backend_count_relocs spu_elf_count_relocs
5489 #define elf_backend_relocate_section spu_elf_relocate_section
5490 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5491 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5492 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5493 #define elf_backend_object_p spu_elf_object_p
5494 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5495 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5497 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5498 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5499 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5500 #define elf_backend_post_process_headers spu_elf_post_process_headers
5501 #define elf_backend_fake_sections spu_elf_fake_sections
5502 #define elf_backend_special_sections spu_elf_special_sections
5503 #define bfd_elf32_bfd_final_link spu_elf_final_link
5505 #include "elf32-target.h"