1 /* SPU specific support for 32-bit ELF
3 Copyright 2006-2013 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
109 case BFD_RELOC_SPU_IMM10W:
111 case BFD_RELOC_SPU_IMM16W:
113 case BFD_RELOC_SPU_LO16:
114 return R_SPU_ADDR16_LO;
115 case BFD_RELOC_SPU_HI16:
116 return R_SPU_ADDR16_HI;
117 case BFD_RELOC_SPU_IMM18:
119 case BFD_RELOC_SPU_PCREL16:
121 case BFD_RELOC_SPU_IMM7:
123 case BFD_RELOC_SPU_IMM8:
125 case BFD_RELOC_SPU_PCREL9a:
127 case BFD_RELOC_SPU_PCREL9b:
129 case BFD_RELOC_SPU_IMM10:
130 return R_SPU_ADDR10I;
131 case BFD_RELOC_SPU_IMM16:
132 return R_SPU_ADDR16I;
135 case BFD_RELOC_32_PCREL:
137 case BFD_RELOC_SPU_PPU32:
139 case BFD_RELOC_SPU_PPU64:
141 case BFD_RELOC_SPU_ADD_PIC:
142 return R_SPU_ADD_PIC;
147 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
149 Elf_Internal_Rela *dst)
151 enum elf_spu_reloc_type r_type;
153 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
154 BFD_ASSERT (r_type < R_SPU_max);
155 cache_ptr->howto = &elf_howto_table[(int) r_type];
158 static reloc_howto_type *
159 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
160 bfd_reloc_code_real_type code)
162 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
164 if (r_type == R_SPU_NONE)
167 return elf_howto_table + r_type;
170 static reloc_howto_type *
171 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
176 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
177 if (elf_howto_table[i].name != NULL
178 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
179 return &elf_howto_table[i];
184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
186 static bfd_reloc_status_type
187 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
188 void *data, asection *input_section,
189 bfd *output_bfd, char **error_message)
191 bfd_size_type octets;
195 /* If this is a relocatable link (output_bfd test tells us), just
196 call the generic function. Any adjustment will be done at final
198 if (output_bfd != NULL)
199 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
200 input_section, output_bfd, error_message);
202 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
203 return bfd_reloc_outofrange;
204 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
206 /* Get symbol value. */
208 if (!bfd_is_com_section (symbol->section))
210 if (symbol->section->output_section)
211 val += symbol->section->output_section->vma;
213 val += reloc_entry->addend;
215 /* Make it pc-relative. */
216 val -= input_section->output_section->vma + input_section->output_offset;
219 if (val + 256 >= 512)
220 return bfd_reloc_overflow;
222 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
224 /* Move two high bits of value to REL9I and REL9 position.
225 The mask will take care of selecting the right field. */
226 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
227 insn &= ~reloc_entry->howto->dst_mask;
228 insn |= val & reloc_entry->howto->dst_mask;
229 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
234 spu_elf_new_section_hook (bfd *abfd, asection *sec)
236 if (!sec->used_by_bfd)
238 struct _spu_elf_section_data *sdata;
240 sdata = bfd_zalloc (abfd, sizeof (*sdata));
243 sec->used_by_bfd = sdata;
246 return _bfd_elf_new_section_hook (abfd, sec);
249 /* Set up overlay info for executables. */
252 spu_elf_object_p (bfd *abfd)
254 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
256 unsigned int i, num_ovl, num_buf;
257 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
258 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
259 Elf_Internal_Phdr *last_phdr = NULL;
261 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
262 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
267 if (last_phdr == NULL
268 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
271 for (j = 1; j < elf_numsections (abfd); j++)
273 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
275 if (ELF_SECTION_SIZE (shdr, phdr) != 0
276 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
278 asection *sec = shdr->bfd_section;
279 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
280 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
288 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
289 strip --strip-unneeded will not remove them. */
292 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
294 if (sym->name != NULL
295 && sym->section != bfd_abs_section_ptr
296 && strncmp (sym->name, "_EAR_", 5) == 0)
297 sym->flags |= BSF_KEEP;
300 /* SPU ELF linker hash table. */
302 struct spu_link_hash_table
304 struct elf_link_hash_table elf;
306 struct spu_elf_params *params;
308 /* Shortcuts to overlay sections. */
314 /* Count of stubs in each overlay section. */
315 unsigned int *stub_count;
317 /* The stub section for each overlay section. */
320 struct elf_link_hash_entry *ovly_entry[2];
322 /* Number of overlay buffers. */
323 unsigned int num_buf;
325 /* Total number of overlays. */
326 unsigned int num_overlays;
328 /* For soft icache. */
329 unsigned int line_size_log2;
330 unsigned int num_lines_log2;
331 unsigned int fromelem_size_log2;
333 /* How much memory we have. */
334 unsigned int local_store;
336 /* Count of overlay stubs needed in non-overlay area. */
337 unsigned int non_ovly_stub;
339 /* Pointer to the fixup section */
343 unsigned int stub_err : 1;
346 /* Hijack the generic got fields for overlay stub accounting. */
350 struct got_entry *next;
359 #define spu_hash_table(p) \
360 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
361 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
365 struct function_info *fun;
366 struct call_info *next;
368 unsigned int max_depth;
369 unsigned int is_tail : 1;
370 unsigned int is_pasted : 1;
371 unsigned int broken_cycle : 1;
372 unsigned int priority : 13;
377 /* List of functions called. Also branches to hot/cold part of
379 struct call_info *call_list;
380 /* For hot/cold part of function, point to owner. */
381 struct function_info *start;
382 /* Symbol at start of function. */
384 Elf_Internal_Sym *sym;
385 struct elf_link_hash_entry *h;
387 /* Function section. */
390 /* Where last called from, and number of sections called from. */
391 asection *last_caller;
392 unsigned int call_count;
393 /* Address range of (this part of) function. */
395 /* Offset where we found a store of lr, or -1 if none found. */
397 /* Offset where we found the stack adjustment insn. */
401 /* Distance from root of call tree. Tail and hot/cold branches
402 count as one deeper. We aren't counting stack frames here. */
404 /* Set if global symbol. */
405 unsigned int global : 1;
406 /* Set if known to be start of function (as distinct from a hunk
407 in hot/cold section. */
408 unsigned int is_func : 1;
409 /* Set if not a root node. */
410 unsigned int non_root : 1;
411 /* Flags used during call tree traversal. It's cheaper to replicate
412 the visit flags than have one which needs clearing after a traversal. */
413 unsigned int visit1 : 1;
414 unsigned int visit2 : 1;
415 unsigned int marking : 1;
416 unsigned int visit3 : 1;
417 unsigned int visit4 : 1;
418 unsigned int visit5 : 1;
419 unsigned int visit6 : 1;
420 unsigned int visit7 : 1;
423 struct spu_elf_stack_info
427 /* Variable size array describing functions, one per contiguous
428 address range belonging to a function. */
429 struct function_info fun[1];
432 static struct function_info *find_function (asection *, bfd_vma,
433 struct bfd_link_info *);
435 /* Create a spu ELF linker hash table. */
437 static struct bfd_link_hash_table *
438 spu_elf_link_hash_table_create (bfd *abfd)
440 struct spu_link_hash_table *htab;
442 htab = bfd_zmalloc (sizeof (*htab));
446 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
447 _bfd_elf_link_hash_newfunc,
448 sizeof (struct elf_link_hash_entry),
455 htab->elf.init_got_refcount.refcount = 0;
456 htab->elf.init_got_refcount.glist = NULL;
457 htab->elf.init_got_offset.offset = 0;
458 htab->elf.init_got_offset.glist = NULL;
459 return &htab->elf.root;
463 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
465 bfd_vma max_branch_log2;
467 struct spu_link_hash_table *htab = spu_hash_table (info);
468 htab->params = params;
469 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
470 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
472 /* For the software i-cache, we provide a "from" list whose size
473 is a power-of-two number of quadwords, big enough to hold one
474 byte per outgoing branch. Compute this number here. */
475 max_branch_log2 = bfd_log2 (htab->params->max_branch);
476 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
479 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
480 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
481 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
484 get_sym_h (struct elf_link_hash_entry **hp,
485 Elf_Internal_Sym **symp,
487 Elf_Internal_Sym **locsymsp,
488 unsigned long r_symndx,
491 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
493 if (r_symndx >= symtab_hdr->sh_info)
495 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
496 struct elf_link_hash_entry *h;
498 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
499 while (h->root.type == bfd_link_hash_indirect
500 || h->root.type == bfd_link_hash_warning)
501 h = (struct elf_link_hash_entry *) h->root.u.i.link;
511 asection *symsec = NULL;
512 if (h->root.type == bfd_link_hash_defined
513 || h->root.type == bfd_link_hash_defweak)
514 symsec = h->root.u.def.section;
520 Elf_Internal_Sym *sym;
521 Elf_Internal_Sym *locsyms = *locsymsp;
525 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
527 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
529 0, NULL, NULL, NULL);
534 sym = locsyms + r_symndx;
543 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
549 /* Create the note section if not already present. This is done early so
550 that the linker maps the sections to the right place in the output. */
553 spu_elf_create_sections (struct bfd_link_info *info)
555 struct spu_link_hash_table *htab = spu_hash_table (info);
558 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
559 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
564 /* Make SPU_PTNOTE_SPUNAME section. */
571 ibfd = info->input_bfds;
572 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
573 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
575 || !bfd_set_section_alignment (ibfd, s, 4))
578 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
579 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
580 size += (name_len + 3) & -4;
582 if (!bfd_set_section_size (ibfd, s, size))
585 data = bfd_zalloc (ibfd, size);
589 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
590 bfd_put_32 (ibfd, name_len, data + 4);
591 bfd_put_32 (ibfd, 1, data + 8);
592 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
593 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
594 bfd_get_filename (info->output_bfd), name_len);
598 if (htab->params->emit_fixups)
603 if (htab->elf.dynobj == NULL)
604 htab->elf.dynobj = ibfd;
605 ibfd = htab->elf.dynobj;
606 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
607 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
608 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
609 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
617 /* qsort predicate to sort sections by vma. */
620 sort_sections (const void *a, const void *b)
622 const asection *const *s1 = a;
623 const asection *const *s2 = b;
624 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
627 return delta < 0 ? -1 : 1;
629 return (*s1)->index - (*s2)->index;
632 /* Identify overlays in the output bfd, and number them.
633 Returns 0 on error, 1 if no overlays, 2 if overlays. */
636 spu_elf_find_overlays (struct bfd_link_info *info)
638 struct spu_link_hash_table *htab = spu_hash_table (info);
639 asection **alloc_sec;
640 unsigned int i, n, ovl_index, num_buf;
643 static const char *const entry_names[2][2] = {
644 { "__ovly_load", "__icache_br_handler" },
645 { "__ovly_return", "__icache_call_handler" }
648 if (info->output_bfd->section_count < 2)
652 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
653 if (alloc_sec == NULL)
656 /* Pick out all the alloced sections. */
657 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
658 if ((s->flags & SEC_ALLOC) != 0
659 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
669 /* Sort them by vma. */
670 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
672 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
673 if (htab->params->ovly_flavour == ovly_soft_icache)
675 unsigned int prev_buf = 0, set_id = 0;
677 /* Look for an overlapping vma to find the first overlay section. */
678 bfd_vma vma_start = 0;
680 for (i = 1; i < n; i++)
683 if (s->vma < ovl_end)
685 asection *s0 = alloc_sec[i - 1];
689 << (htab->num_lines_log2 + htab->line_size_log2)));
694 ovl_end = s->vma + s->size;
697 /* Now find any sections within the cache area. */
698 for (ovl_index = 0, num_buf = 0; i < n; i++)
701 if (s->vma >= ovl_end)
704 /* A section in an overlay area called .ovl.init is not
705 an overlay, in the sense that it might be loaded in
706 by the overlay manager, but rather the initial
707 section contents for the overlay buffer. */
708 if (strncmp (s->name, ".ovl.init", 9) != 0)
710 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
711 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
714 if ((s->vma - vma_start) & (htab->params->line_size - 1))
716 info->callbacks->einfo (_("%X%P: overlay section %A "
717 "does not start on a cache line.\n"),
719 bfd_set_error (bfd_error_bad_value);
722 else if (s->size > htab->params->line_size)
724 info->callbacks->einfo (_("%X%P: overlay section %A "
725 "is larger than a cache line.\n"),
727 bfd_set_error (bfd_error_bad_value);
731 alloc_sec[ovl_index++] = s;
732 spu_elf_section_data (s)->u.o.ovl_index
733 = (set_id << htab->num_lines_log2) + num_buf;
734 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
738 /* Ensure there are no more overlay sections. */
742 if (s->vma < ovl_end)
744 info->callbacks->einfo (_("%X%P: overlay section %A "
745 "is not in cache area.\n"),
747 bfd_set_error (bfd_error_bad_value);
751 ovl_end = s->vma + s->size;
756 /* Look for overlapping vmas. Any with overlap must be overlays.
757 Count them. Also count the number of overlay regions. */
758 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
761 if (s->vma < ovl_end)
763 asection *s0 = alloc_sec[i - 1];
765 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
768 if (strncmp (s0->name, ".ovl.init", 9) != 0)
770 alloc_sec[ovl_index] = s0;
771 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
772 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
775 ovl_end = s->vma + s->size;
777 if (strncmp (s->name, ".ovl.init", 9) != 0)
779 alloc_sec[ovl_index] = s;
780 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
781 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
782 if (s0->vma != s->vma)
784 info->callbacks->einfo (_("%X%P: overlay sections %A "
785 "and %A do not start at the "
788 bfd_set_error (bfd_error_bad_value);
791 if (ovl_end < s->vma + s->size)
792 ovl_end = s->vma + s->size;
796 ovl_end = s->vma + s->size;
800 htab->num_overlays = ovl_index;
801 htab->num_buf = num_buf;
802 htab->ovl_sec = alloc_sec;
807 for (i = 0; i < 2; i++)
810 struct elf_link_hash_entry *h;
812 name = entry_names[i][htab->params->ovly_flavour];
813 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
817 if (h->root.type == bfd_link_hash_new)
819 h->root.type = bfd_link_hash_undefined;
821 h->ref_regular_nonweak = 1;
824 htab->ovly_entry[i] = h;
830 /* Non-zero to use bra in overlay stubs rather than br. */
833 #define BRA 0x30000000
834 #define BRASL 0x31000000
835 #define BR 0x32000000
836 #define BRSL 0x33000000
837 #define NOP 0x40200000
838 #define LNOP 0x00200000
839 #define ILA 0x42000000
841 /* Return true for all relative and absolute branch instructions.
849 brhnz 00100011 0.. */
852 is_branch (const unsigned char *insn)
854 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
857 /* Return true for all indirect branch instructions.
865 bihnz 00100101 011 */
868 is_indirect_branch (const unsigned char *insn)
870 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
873 /* Return true for branch hint instructions.
878 is_hint (const unsigned char *insn)
880 return (insn[0] & 0xfc) == 0x10;
883 /* True if INPUT_SECTION might need overlay stubs. */
886 maybe_needs_stubs (asection *input_section)
888 /* No stubs for debug sections and suchlike. */
889 if ((input_section->flags & SEC_ALLOC) == 0)
892 /* No stubs for link-once sections that will be discarded. */
893 if (input_section->output_section == bfd_abs_section_ptr)
896 /* Don't create stubs for .eh_frame references. */
897 if (strcmp (input_section->name, ".eh_frame") == 0)
919 /* Return non-zero if this reloc symbol should go via an overlay stub.
920 Return 2 if the stub must be in non-overlay area. */
922 static enum _stub_type
923 needs_ovl_stub (struct elf_link_hash_entry *h,
924 Elf_Internal_Sym *sym,
926 asection *input_section,
927 Elf_Internal_Rela *irela,
929 struct bfd_link_info *info)
931 struct spu_link_hash_table *htab = spu_hash_table (info);
932 enum elf_spu_reloc_type r_type;
933 unsigned int sym_type;
934 bfd_boolean branch, hint, call;
935 enum _stub_type ret = no_stub;
939 || sym_sec->output_section == bfd_abs_section_ptr
940 || spu_elf_section_data (sym_sec->output_section) == NULL)
945 /* Ensure no stubs for user supplied overlay manager syms. */
946 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
949 /* setjmp always goes via an overlay stub, because then the return
950 and hence the longjmp goes via __ovly_return. That magically
951 makes setjmp/longjmp between overlays work. */
952 if (strncmp (h->root.root.string, "setjmp", 6) == 0
953 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
960 sym_type = ELF_ST_TYPE (sym->st_info);
962 r_type = ELF32_R_TYPE (irela->r_info);
966 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
968 if (contents == NULL)
971 if (!bfd_get_section_contents (input_section->owner,
978 contents += irela->r_offset;
980 branch = is_branch (contents);
981 hint = is_hint (contents);
984 call = (contents[0] & 0xfd) == 0x31;
986 && sym_type != STT_FUNC
989 /* It's common for people to write assembly and forget
990 to give function symbols the right type. Handle
991 calls to such symbols, but warn so that (hopefully)
992 people will fix their code. We need the symbol
993 type to be correct to distinguish function pointer
994 initialisation from other pointer initialisations. */
995 const char *sym_name;
998 sym_name = h->root.root.string;
1001 Elf_Internal_Shdr *symtab_hdr;
1002 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1003 sym_name = bfd_elf_sym_name (input_section->owner,
1008 (*_bfd_error_handler) (_("warning: call to non-function"
1009 " symbol %s defined in %B"),
1010 sym_sec->owner, sym_name);
1016 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1017 || (sym_type != STT_FUNC
1018 && !(branch || hint)
1019 && (sym_sec->flags & SEC_CODE) == 0))
1022 /* Usually, symbols in non-overlay sections don't need stubs. */
1023 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1024 && !htab->params->non_overlay_stubs)
1027 /* A reference from some other section to a symbol in an overlay
1028 section needs a stub. */
1029 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1030 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1032 unsigned int lrlive = 0;
1034 lrlive = (contents[1] & 0x70) >> 4;
1036 if (!lrlive && (call || sym_type == STT_FUNC))
1037 ret = call_ovl_stub;
1039 ret = br000_ovl_stub + lrlive;
1042 /* If this insn isn't a branch then we are possibly taking the
1043 address of a function and passing it out somehow. Soft-icache code
1044 always generates inline code to do indirect branches. */
1045 if (!(branch || hint)
1046 && sym_type == STT_FUNC
1047 && htab->params->ovly_flavour != ovly_soft_icache)
1054 count_stub (struct spu_link_hash_table *htab,
1057 enum _stub_type stub_type,
1058 struct elf_link_hash_entry *h,
1059 const Elf_Internal_Rela *irela)
1061 unsigned int ovl = 0;
1062 struct got_entry *g, **head;
1065 /* If this instruction is a branch or call, we need a stub
1066 for it. One stub per function per overlay.
1067 If it isn't a branch, then we are taking the address of
1068 this function so need a stub in the non-overlay area
1069 for it. One stub per function. */
1070 if (stub_type != nonovl_stub)
1071 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1074 head = &h->got.glist;
1077 if (elf_local_got_ents (ibfd) == NULL)
1079 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1080 * sizeof (*elf_local_got_ents (ibfd)));
1081 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1082 if (elf_local_got_ents (ibfd) == NULL)
1085 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1088 if (htab->params->ovly_flavour == ovly_soft_icache)
1090 htab->stub_count[ovl] += 1;
1096 addend = irela->r_addend;
1100 struct got_entry *gnext;
1102 for (g = *head; g != NULL; g = g->next)
1103 if (g->addend == addend && g->ovl == 0)
1108 /* Need a new non-overlay area stub. Zap other stubs. */
1109 for (g = *head; g != NULL; g = gnext)
1112 if (g->addend == addend)
1114 htab->stub_count[g->ovl] -= 1;
1122 for (g = *head; g != NULL; g = g->next)
1123 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1129 g = bfd_malloc (sizeof *g);
1134 g->stub_addr = (bfd_vma) -1;
1138 htab->stub_count[ovl] += 1;
1144 /* Support two sizes of overlay stubs, a slower more compact stub of two
1145 instructions, and a faster stub of four instructions.
1146 Soft-icache stubs are four or eight words. */
1149 ovl_stub_size (struct spu_elf_params *params)
1151 return 16 << params->ovly_flavour >> params->compact_stub;
1155 ovl_stub_size_log2 (struct spu_elf_params *params)
1157 return 4 + params->ovly_flavour - params->compact_stub;
1160 /* Two instruction overlay stubs look like:
1162 brsl $75,__ovly_load
1163 .word target_ovl_and_address
1165 ovl_and_address is a word with the overlay number in the top 14 bits
1166 and local store address in the bottom 18 bits.
1168 Four instruction overlay stubs look like:
1172 ila $79,target_address
1175 Software icache stubs are:
1179 .word lrlive_branchlocalstoreaddr;
1180 brasl $75,__icache_br_handler
1185 build_stub (struct bfd_link_info *info,
1188 enum _stub_type stub_type,
1189 struct elf_link_hash_entry *h,
1190 const Elf_Internal_Rela *irela,
1194 struct spu_link_hash_table *htab = spu_hash_table (info);
1195 unsigned int ovl, dest_ovl, set_id;
1196 struct got_entry *g, **head;
1198 bfd_vma addend, from, to, br_dest, patt;
1199 unsigned int lrlive;
1202 if (stub_type != nonovl_stub)
1203 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1206 head = &h->got.glist;
1208 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1212 addend = irela->r_addend;
1214 if (htab->params->ovly_flavour == ovly_soft_icache)
1216 g = bfd_malloc (sizeof *g);
1222 g->br_addr = (irela->r_offset
1223 + isec->output_offset
1224 + isec->output_section->vma);
1230 for (g = *head; g != NULL; g = g->next)
1231 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1236 if (g->ovl == 0 && ovl != 0)
1239 if (g->stub_addr != (bfd_vma) -1)
1243 sec = htab->stub_sec[ovl];
1244 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1245 from = sec->size + sec->output_offset + sec->output_section->vma;
1246 g->stub_addr = from;
1247 to = (htab->ovly_entry[0]->root.u.def.value
1248 + htab->ovly_entry[0]->root.u.def.section->output_offset
1249 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1251 if (((dest | to | from) & 3) != 0)
1256 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1258 if (htab->params->ovly_flavour == ovly_normal
1259 && !htab->params->compact_stub)
1261 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1262 sec->contents + sec->size);
1263 bfd_put_32 (sec->owner, LNOP,
1264 sec->contents + sec->size + 4);
1265 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1266 sec->contents + sec->size + 8);
1268 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1269 sec->contents + sec->size + 12);
1271 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1272 sec->contents + sec->size + 12);
1274 else if (htab->params->ovly_flavour == ovly_normal
1275 && htab->params->compact_stub)
1278 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1279 sec->contents + sec->size);
1281 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1282 sec->contents + sec->size);
1283 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1284 sec->contents + sec->size + 4);
1286 else if (htab->params->ovly_flavour == ovly_soft_icache
1287 && htab->params->compact_stub)
1290 if (stub_type == nonovl_stub)
1292 else if (stub_type == call_ovl_stub)
1293 /* A brsl makes lr live and *(*sp+16) is live.
1294 Tail calls have the same liveness. */
1296 else if (!htab->params->lrlive_analysis)
1297 /* Assume stack frame and lr save. */
1299 else if (irela != NULL)
1301 /* Analyse branch instructions. */
1302 struct function_info *caller;
1305 caller = find_function (isec, irela->r_offset, info);
1306 if (caller->start == NULL)
1307 off = irela->r_offset;
1310 struct function_info *found = NULL;
1312 /* Find the earliest piece of this function that
1313 has frame adjusting instructions. We might
1314 see dynamic frame adjustment (eg. for alloca)
1315 in some later piece, but functions using
1316 alloca always set up a frame earlier. Frame
1317 setup instructions are always in one piece. */
1318 if (caller->lr_store != (bfd_vma) -1
1319 || caller->sp_adjust != (bfd_vma) -1)
1321 while (caller->start != NULL)
1323 caller = caller->start;
1324 if (caller->lr_store != (bfd_vma) -1
1325 || caller->sp_adjust != (bfd_vma) -1)
1333 if (off > caller->sp_adjust)
1335 if (off > caller->lr_store)
1336 /* Only *(*sp+16) is live. */
1339 /* If no lr save, then we must be in a
1340 leaf function with a frame.
1341 lr is still live. */
1344 else if (off > caller->lr_store)
1346 /* Between lr save and stack adjust. */
1348 /* This should never happen since prologues won't
1353 /* On entry to function. */
1356 if (stub_type != br000_ovl_stub
1357 && lrlive != stub_type - br000_ovl_stub)
1358 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1359 "from analysis (%u)\n"),
1360 isec, irela->r_offset, lrlive,
1361 stub_type - br000_ovl_stub);
1364 /* If given lrlive info via .brinfo, use it. */
1365 if (stub_type > br000_ovl_stub)
1366 lrlive = stub_type - br000_ovl_stub;
1369 to = (htab->ovly_entry[1]->root.u.def.value
1370 + htab->ovly_entry[1]->root.u.def.section->output_offset
1371 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1373 /* The branch that uses this stub goes to stub_addr + 4. We'll
1374 set up an xor pattern that can be used by the icache manager
1375 to modify this branch to go directly to its destination. */
1377 br_dest = g->stub_addr;
1380 /* Except in the case of _SPUEAR_ stubs, the branch in
1381 question is the one in the stub itself. */
1382 BFD_ASSERT (stub_type == nonovl_stub);
1383 g->br_addr = g->stub_addr;
1387 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1388 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1389 sec->contents + sec->size);
1390 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1391 sec->contents + sec->size + 4);
1392 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1393 sec->contents + sec->size + 8);
1394 patt = dest ^ br_dest;
1395 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1396 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1397 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1398 sec->contents + sec->size + 12);
1401 /* Extra space for linked list entries. */
1407 sec->size += ovl_stub_size (htab->params);
1409 if (htab->params->emit_stub_syms)
1415 len = 8 + sizeof (".ovl_call.") - 1;
1417 len += strlen (h->root.root.string);
1422 add = (int) irela->r_addend & 0xffffffff;
1425 name = bfd_malloc (len + 1);
1429 sprintf (name, "%08x.ovl_call.", g->ovl);
1431 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1433 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1434 dest_sec->id & 0xffffffff,
1435 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1437 sprintf (name + len - 9, "+%x", add);
1439 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1443 if (h->root.type == bfd_link_hash_new)
1445 h->root.type = bfd_link_hash_defined;
1446 h->root.u.def.section = sec;
1447 h->size = ovl_stub_size (htab->params);
1448 h->root.u.def.value = sec->size - h->size;
1452 h->ref_regular_nonweak = 1;
1453 h->forced_local = 1;
1461 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1465 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1467 /* Symbols starting with _SPUEAR_ need a stub because they may be
1468 invoked by the PPU. */
1469 struct bfd_link_info *info = inf;
1470 struct spu_link_hash_table *htab = spu_hash_table (info);
1473 if ((h->root.type == bfd_link_hash_defined
1474 || h->root.type == bfd_link_hash_defweak)
1476 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1477 && (sym_sec = h->root.u.def.section) != NULL
1478 && sym_sec->output_section != bfd_abs_section_ptr
1479 && spu_elf_section_data (sym_sec->output_section) != NULL
1480 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1481 || htab->params->non_overlay_stubs))
1483 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1490 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1492 /* Symbols starting with _SPUEAR_ need a stub because they may be
1493 invoked by the PPU. */
1494 struct bfd_link_info *info = inf;
1495 struct spu_link_hash_table *htab = spu_hash_table (info);
1498 if ((h->root.type == bfd_link_hash_defined
1499 || h->root.type == bfd_link_hash_defweak)
1501 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1502 && (sym_sec = h->root.u.def.section) != NULL
1503 && sym_sec->output_section != bfd_abs_section_ptr
1504 && spu_elf_section_data (sym_sec->output_section) != NULL
1505 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1506 || htab->params->non_overlay_stubs))
1508 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1509 h->root.u.def.value, sym_sec);
1515 /* Size or build stubs. */
1518 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1520 struct spu_link_hash_table *htab = spu_hash_table (info);
1523 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1525 extern const bfd_target bfd_elf32_spu_vec;
1526 Elf_Internal_Shdr *symtab_hdr;
1528 Elf_Internal_Sym *local_syms = NULL;
1530 if (ibfd->xvec != &bfd_elf32_spu_vec)
1533 /* We'll need the symbol table in a second. */
1534 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1535 if (symtab_hdr->sh_info == 0)
1538 /* Walk over each section attached to the input bfd. */
1539 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1541 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1543 /* If there aren't any relocs, then there's nothing more to do. */
1544 if ((isec->flags & SEC_RELOC) == 0
1545 || isec->reloc_count == 0)
1548 if (!maybe_needs_stubs (isec))
1551 /* Get the relocs. */
1552 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1554 if (internal_relocs == NULL)
1555 goto error_ret_free_local;
1557 /* Now examine each relocation. */
1558 irela = internal_relocs;
1559 irelaend = irela + isec->reloc_count;
1560 for (; irela < irelaend; irela++)
1562 enum elf_spu_reloc_type r_type;
1563 unsigned int r_indx;
1565 Elf_Internal_Sym *sym;
1566 struct elf_link_hash_entry *h;
1567 enum _stub_type stub_type;
1569 r_type = ELF32_R_TYPE (irela->r_info);
1570 r_indx = ELF32_R_SYM (irela->r_info);
1572 if (r_type >= R_SPU_max)
1574 bfd_set_error (bfd_error_bad_value);
1575 error_ret_free_internal:
1576 if (elf_section_data (isec)->relocs != internal_relocs)
1577 free (internal_relocs);
1578 error_ret_free_local:
1579 if (local_syms != NULL
1580 && (symtab_hdr->contents
1581 != (unsigned char *) local_syms))
1586 /* Determine the reloc target section. */
1587 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1588 goto error_ret_free_internal;
1590 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1592 if (stub_type == no_stub)
1594 else if (stub_type == stub_error)
1595 goto error_ret_free_internal;
1597 if (htab->stub_count == NULL)
1600 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1601 htab->stub_count = bfd_zmalloc (amt);
1602 if (htab->stub_count == NULL)
1603 goto error_ret_free_internal;
1608 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1609 goto error_ret_free_internal;
1616 dest = h->root.u.def.value;
1618 dest = sym->st_value;
1619 dest += irela->r_addend;
1620 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1622 goto error_ret_free_internal;
1626 /* We're done with the internal relocs, free them. */
1627 if (elf_section_data (isec)->relocs != internal_relocs)
1628 free (internal_relocs);
1631 if (local_syms != NULL
1632 && symtab_hdr->contents != (unsigned char *) local_syms)
1634 if (!info->keep_memory)
1637 symtab_hdr->contents = (unsigned char *) local_syms;
1644 /* Allocate space for overlay call and return stubs.
1645 Return 0 on error, 1 if no overlays, 2 otherwise. */
1648 spu_elf_size_stubs (struct bfd_link_info *info)
1650 struct spu_link_hash_table *htab;
1657 if (!process_stubs (info, FALSE))
1660 htab = spu_hash_table (info);
1661 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1665 ibfd = info->input_bfds;
1666 if (htab->stub_count != NULL)
1668 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1669 htab->stub_sec = bfd_zmalloc (amt);
1670 if (htab->stub_sec == NULL)
1673 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1674 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1675 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1676 htab->stub_sec[0] = stub;
1678 || !bfd_set_section_alignment (ibfd, stub,
1679 ovl_stub_size_log2 (htab->params)))
1681 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1682 if (htab->params->ovly_flavour == ovly_soft_icache)
1683 /* Extra space for linked list entries. */
1684 stub->size += htab->stub_count[0] * 16;
1686 for (i = 0; i < htab->num_overlays; ++i)
1688 asection *osec = htab->ovl_sec[i];
1689 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1690 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1691 htab->stub_sec[ovl] = stub;
1693 || !bfd_set_section_alignment (ibfd, stub,
1694 ovl_stub_size_log2 (htab->params)))
1696 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1700 if (htab->params->ovly_flavour == ovly_soft_icache)
1702 /* Space for icache manager tables.
1703 a) Tag array, one quadword per cache line.
1704 b) Rewrite "to" list, one quadword per cache line.
1705 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1706 a power-of-two number of full quadwords) per cache line. */
1709 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1710 if (htab->ovtab == NULL
1711 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1714 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1715 << htab->num_lines_log2;
1717 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1718 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1719 if (htab->init == NULL
1720 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1723 htab->init->size = 16;
1725 else if (htab->stub_count == NULL)
1729 /* htab->ovtab consists of two arrays.
1739 . } _ovly_buf_table[];
1742 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1743 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1744 if (htab->ovtab == NULL
1745 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1748 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1751 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1752 if (htab->toe == NULL
1753 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1755 htab->toe->size = 16;
1760 /* Called from ld to place overlay manager data sections. This is done
1761 after the overlay manager itself is loaded, mainly so that the
1762 linker's htab->init section is placed after any other .ovl.init
1766 spu_elf_place_overlay_data (struct bfd_link_info *info)
1768 struct spu_link_hash_table *htab = spu_hash_table (info);
1771 if (htab->stub_sec != NULL)
1773 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1775 for (i = 0; i < htab->num_overlays; ++i)
1777 asection *osec = htab->ovl_sec[i];
1778 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1779 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1783 if (htab->params->ovly_flavour == ovly_soft_icache)
1784 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1786 if (htab->ovtab != NULL)
1788 const char *ovout = ".data";
1789 if (htab->params->ovly_flavour == ovly_soft_icache)
1791 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1794 if (htab->toe != NULL)
1795 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1798 /* Functions to handle embedded spu_ovl.o object. */
1801 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1807 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1813 struct _ovl_stream *os;
1817 os = (struct _ovl_stream *) stream;
1818 max = (const char *) os->end - (const char *) os->start;
1820 if ((ufile_ptr) offset >= max)
1824 if (count > max - offset)
1825 count = max - offset;
1827 memcpy (buf, (const char *) os->start + offset, count);
1832 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1834 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1841 return *ovl_bfd != NULL;
1845 overlay_index (asection *sec)
1848 || sec->output_section == bfd_abs_section_ptr)
1850 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1853 /* Define an STT_OBJECT symbol. */
1855 static struct elf_link_hash_entry *
1856 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1858 struct elf_link_hash_entry *h;
1860 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1864 if (h->root.type != bfd_link_hash_defined
1867 h->root.type = bfd_link_hash_defined;
1868 h->root.u.def.section = htab->ovtab;
1869 h->type = STT_OBJECT;
1872 h->ref_regular_nonweak = 1;
1875 else if (h->root.u.def.section->owner != NULL)
1877 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1878 h->root.u.def.section->owner,
1879 h->root.root.string);
1880 bfd_set_error (bfd_error_bad_value);
1885 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1886 h->root.root.string);
1887 bfd_set_error (bfd_error_bad_value);
1894 /* Fill in all stubs and the overlay tables. */
1897 spu_elf_build_stubs (struct bfd_link_info *info)
1899 struct spu_link_hash_table *htab = spu_hash_table (info);
1900 struct elf_link_hash_entry *h;
1906 if (htab->num_overlays != 0)
1908 for (i = 0; i < 2; i++)
1910 h = htab->ovly_entry[i];
1912 && (h->root.type == bfd_link_hash_defined
1913 || h->root.type == bfd_link_hash_defweak)
1916 s = h->root.u.def.section->output_section;
1917 if (spu_elf_section_data (s)->u.o.ovl_index)
1919 (*_bfd_error_handler) (_("%s in overlay section"),
1920 h->root.root.string);
1921 bfd_set_error (bfd_error_bad_value);
1928 if (htab->stub_sec != NULL)
1930 for (i = 0; i <= htab->num_overlays; i++)
1931 if (htab->stub_sec[i]->size != 0)
1933 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1934 htab->stub_sec[i]->size);
1935 if (htab->stub_sec[i]->contents == NULL)
1937 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1938 htab->stub_sec[i]->size = 0;
1941 /* Fill in all the stubs. */
1942 process_stubs (info, TRUE);
1943 if (!htab->stub_err)
1944 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1948 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1949 bfd_set_error (bfd_error_bad_value);
1953 for (i = 0; i <= htab->num_overlays; i++)
1955 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1957 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1958 bfd_set_error (bfd_error_bad_value);
1961 htab->stub_sec[i]->rawsize = 0;
1965 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1968 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1969 if (htab->ovtab->contents == NULL)
1972 p = htab->ovtab->contents;
1973 if (htab->params->ovly_flavour == ovly_soft_icache)
1977 h = define_ovtab_symbol (htab, "__icache_tag_array");
1980 h->root.u.def.value = 0;
1981 h->size = 16 << htab->num_lines_log2;
1984 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1987 h->root.u.def.value = 16 << htab->num_lines_log2;
1988 h->root.u.def.section = bfd_abs_section_ptr;
1990 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1993 h->root.u.def.value = off;
1994 h->size = 16 << htab->num_lines_log2;
1997 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2000 h->root.u.def.value = 16 << htab->num_lines_log2;
2001 h->root.u.def.section = bfd_abs_section_ptr;
2003 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2006 h->root.u.def.value = off;
2007 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2010 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2013 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2014 + htab->num_lines_log2);
2015 h->root.u.def.section = bfd_abs_section_ptr;
2017 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2020 h->root.u.def.value = htab->fromelem_size_log2;
2021 h->root.u.def.section = bfd_abs_section_ptr;
2023 h = define_ovtab_symbol (htab, "__icache_base");
2026 h->root.u.def.value = htab->ovl_sec[0]->vma;
2027 h->root.u.def.section = bfd_abs_section_ptr;
2028 h->size = htab->num_buf << htab->line_size_log2;
2030 h = define_ovtab_symbol (htab, "__icache_linesize");
2033 h->root.u.def.value = 1 << htab->line_size_log2;
2034 h->root.u.def.section = bfd_abs_section_ptr;
2036 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2039 h->root.u.def.value = htab->line_size_log2;
2040 h->root.u.def.section = bfd_abs_section_ptr;
2042 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2045 h->root.u.def.value = -htab->line_size_log2;
2046 h->root.u.def.section = bfd_abs_section_ptr;
2048 h = define_ovtab_symbol (htab, "__icache_cachesize");
2051 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2052 h->root.u.def.section = bfd_abs_section_ptr;
2054 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2057 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2058 h->root.u.def.section = bfd_abs_section_ptr;
2060 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2063 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2064 h->root.u.def.section = bfd_abs_section_ptr;
2066 if (htab->init != NULL && htab->init->size != 0)
2068 htab->init->contents = bfd_zalloc (htab->init->owner,
2070 if (htab->init->contents == NULL)
2073 h = define_ovtab_symbol (htab, "__icache_fileoff");
2076 h->root.u.def.value = 0;
2077 h->root.u.def.section = htab->init;
2083 /* Write out _ovly_table. */
2084 /* set low bit of .size to mark non-overlay area as present. */
2086 obfd = htab->ovtab->output_section->owner;
2087 for (s = obfd->sections; s != NULL; s = s->next)
2089 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2093 unsigned long off = ovl_index * 16;
2094 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2096 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2097 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2099 /* file_off written later in spu_elf_modify_program_headers. */
2100 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2104 h = define_ovtab_symbol (htab, "_ovly_table");
2107 h->root.u.def.value = 16;
2108 h->size = htab->num_overlays * 16;
2110 h = define_ovtab_symbol (htab, "_ovly_table_end");
2113 h->root.u.def.value = htab->num_overlays * 16 + 16;
2116 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2119 h->root.u.def.value = htab->num_overlays * 16 + 16;
2120 h->size = htab->num_buf * 4;
2122 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2125 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2129 h = define_ovtab_symbol (htab, "_EAR_");
2132 h->root.u.def.section = htab->toe;
2133 h->root.u.def.value = 0;
2139 /* Check that all loadable section VMAs lie in the range
2140 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2143 spu_elf_check_vma (struct bfd_link_info *info)
2145 struct elf_segment_map *m;
2147 struct spu_link_hash_table *htab = spu_hash_table (info);
2148 bfd *abfd = info->output_bfd;
2149 bfd_vma hi = htab->params->local_store_hi;
2150 bfd_vma lo = htab->params->local_store_lo;
2152 htab->local_store = hi + 1 - lo;
2154 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2155 if (m->p_type == PT_LOAD)
2156 for (i = 0; i < m->count; i++)
2157 if (m->sections[i]->size != 0
2158 && (m->sections[i]->vma < lo
2159 || m->sections[i]->vma > hi
2160 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2161 return m->sections[i];
2166 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2167 Search for stack adjusting insns, and return the sp delta.
2168 If a store of lr is found save the instruction offset to *LR_STORE.
2169 If a stack adjusting instruction is found, save that offset to
2173 find_function_stack_adjust (asection *sec,
2180 memset (reg, 0, sizeof (reg));
2181 for ( ; offset + 4 <= sec->size; offset += 4)
2183 unsigned char buf[4];
2187 /* Assume no relocs on stack adjusing insns. */
2188 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2192 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2194 if (buf[0] == 0x24 /* stqd */)
2196 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2201 /* Partly decoded immediate field. */
2202 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2204 if (buf[0] == 0x1c /* ai */)
2207 imm = (imm ^ 0x200) - 0x200;
2208 reg[rt] = reg[ra] + imm;
2210 if (rt == 1 /* sp */)
2214 *sp_adjust = offset;
2218 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2220 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2222 reg[rt] = reg[ra] + reg[rb];
2227 *sp_adjust = offset;
2231 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2233 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2235 reg[rt] = reg[rb] - reg[ra];
2240 *sp_adjust = offset;
2244 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2246 if (buf[0] >= 0x42 /* ila */)
2247 imm |= (buf[0] & 1) << 17;
2252 if (buf[0] == 0x40 /* il */)
2254 if ((buf[1] & 0x80) == 0)
2256 imm = (imm ^ 0x8000) - 0x8000;
2258 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2264 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2266 reg[rt] |= imm & 0xffff;
2269 else if (buf[0] == 0x04 /* ori */)
2272 imm = (imm ^ 0x200) - 0x200;
2273 reg[rt] = reg[ra] | imm;
2276 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2278 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2279 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2280 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2281 | ((imm & 0x1000) ? 0x000000ff : 0));
2284 else if (buf[0] == 0x16 /* andbi */)
2290 reg[rt] = reg[ra] & imm;
2293 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2295 /* Used in pic reg load. Say rt is trashed. Won't be used
2296 in stack adjust, but we need to continue past this branch. */
2300 else if (is_branch (buf) || is_indirect_branch (buf))
2301 /* If we hit a branch then we must be out of the prologue. */
2308 /* qsort predicate to sort symbols by section and value. */
2310 static Elf_Internal_Sym *sort_syms_syms;
2311 static asection **sort_syms_psecs;
2314 sort_syms (const void *a, const void *b)
2316 Elf_Internal_Sym *const *s1 = a;
2317 Elf_Internal_Sym *const *s2 = b;
2318 asection *sec1,*sec2;
2319 bfd_signed_vma delta;
2321 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2322 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2325 return sec1->index - sec2->index;
2327 delta = (*s1)->st_value - (*s2)->st_value;
2329 return delta < 0 ? -1 : 1;
2331 delta = (*s2)->st_size - (*s1)->st_size;
2333 return delta < 0 ? -1 : 1;
2335 return *s1 < *s2 ? -1 : 1;
2338 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2339 entries for section SEC. */
2341 static struct spu_elf_stack_info *
2342 alloc_stack_info (asection *sec, int max_fun)
2344 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2347 amt = sizeof (struct spu_elf_stack_info);
2348 amt += (max_fun - 1) * sizeof (struct function_info);
2349 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2350 if (sec_data->u.i.stack_info != NULL)
2351 sec_data->u.i.stack_info->max_fun = max_fun;
2352 return sec_data->u.i.stack_info;
2355 /* Add a new struct function_info describing a (part of a) function
2356 starting at SYM_H. Keep the array sorted by address. */
2358 static struct function_info *
2359 maybe_insert_function (asection *sec,
2362 bfd_boolean is_func)
2364 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2365 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2371 sinfo = alloc_stack_info (sec, 20);
2378 Elf_Internal_Sym *sym = sym_h;
2379 off = sym->st_value;
2380 size = sym->st_size;
2384 struct elf_link_hash_entry *h = sym_h;
2385 off = h->root.u.def.value;
2389 for (i = sinfo->num_fun; --i >= 0; )
2390 if (sinfo->fun[i].lo <= off)
2395 /* Don't add another entry for an alias, but do update some
2397 if (sinfo->fun[i].lo == off)
2399 /* Prefer globals over local syms. */
2400 if (global && !sinfo->fun[i].global)
2402 sinfo->fun[i].global = TRUE;
2403 sinfo->fun[i].u.h = sym_h;
2406 sinfo->fun[i].is_func = TRUE;
2407 return &sinfo->fun[i];
2409 /* Ignore a zero-size symbol inside an existing function. */
2410 else if (sinfo->fun[i].hi > off && size == 0)
2411 return &sinfo->fun[i];
2414 if (sinfo->num_fun >= sinfo->max_fun)
2416 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2417 bfd_size_type old = amt;
2419 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2420 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2421 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2422 sinfo = bfd_realloc (sinfo, amt);
2425 memset ((char *) sinfo + old, 0, amt - old);
2426 sec_data->u.i.stack_info = sinfo;
2429 if (++i < sinfo->num_fun)
2430 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2431 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2432 sinfo->fun[i].is_func = is_func;
2433 sinfo->fun[i].global = global;
2434 sinfo->fun[i].sec = sec;
2436 sinfo->fun[i].u.h = sym_h;
2438 sinfo->fun[i].u.sym = sym_h;
2439 sinfo->fun[i].lo = off;
2440 sinfo->fun[i].hi = off + size;
2441 sinfo->fun[i].lr_store = -1;
2442 sinfo->fun[i].sp_adjust = -1;
2443 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2444 &sinfo->fun[i].lr_store,
2445 &sinfo->fun[i].sp_adjust);
2446 sinfo->num_fun += 1;
2447 return &sinfo->fun[i];
2450 /* Return the name of FUN. */
2453 func_name (struct function_info *fun)
2457 Elf_Internal_Shdr *symtab_hdr;
2459 while (fun->start != NULL)
2463 return fun->u.h->root.root.string;
2466 if (fun->u.sym->st_name == 0)
2468 size_t len = strlen (sec->name);
2469 char *name = bfd_malloc (len + 10);
2472 sprintf (name, "%s+%lx", sec->name,
2473 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2477 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2478 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2481 /* Read the instruction at OFF in SEC. Return true iff the instruction
2482 is a nop, lnop, or stop 0 (all zero insn). */
2485 is_nop (asection *sec, bfd_vma off)
2487 unsigned char insn[4];
2489 if (off + 4 > sec->size
2490 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2492 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2494 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2499 /* Extend the range of FUN to cover nop padding up to LIMIT.
2500 Return TRUE iff some instruction other than a NOP was found. */
2503 insns_at_end (struct function_info *fun, bfd_vma limit)
2505 bfd_vma off = (fun->hi + 3) & -4;
2507 while (off < limit && is_nop (fun->sec, off))
2518 /* Check and fix overlapping function ranges. Return TRUE iff there
2519 are gaps in the current info we have about functions in SEC. */
2522 check_function_ranges (asection *sec, struct bfd_link_info *info)
2524 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2525 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2527 bfd_boolean gaps = FALSE;
2532 for (i = 1; i < sinfo->num_fun; i++)
2533 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2535 /* Fix overlapping symbols. */
2536 const char *f1 = func_name (&sinfo->fun[i - 1]);
2537 const char *f2 = func_name (&sinfo->fun[i]);
2539 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2540 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2542 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2545 if (sinfo->num_fun == 0)
2549 if (sinfo->fun[0].lo != 0)
2551 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2553 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2555 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2556 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2558 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2564 /* Search current function info for a function that contains address
2565 OFFSET in section SEC. */
2567 static struct function_info *
2568 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2570 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2571 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2575 hi = sinfo->num_fun;
2578 mid = (lo + hi) / 2;
2579 if (offset < sinfo->fun[mid].lo)
2581 else if (offset >= sinfo->fun[mid].hi)
2584 return &sinfo->fun[mid];
2586 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2588 bfd_set_error (bfd_error_bad_value);
2592 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2593 if CALLEE was new. If this function return FALSE, CALLEE should
2597 insert_callee (struct function_info *caller, struct call_info *callee)
2599 struct call_info **pp, *p;
2601 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2602 if (p->fun == callee->fun)
2604 /* Tail calls use less stack than normal calls. Retain entry
2605 for normal call over one for tail call. */
2606 p->is_tail &= callee->is_tail;
2609 p->fun->start = NULL;
2610 p->fun->is_func = TRUE;
2612 p->count += callee->count;
2613 /* Reorder list so most recent call is first. */
2615 p->next = caller->call_list;
2616 caller->call_list = p;
2619 callee->next = caller->call_list;
2620 caller->call_list = callee;
2624 /* Copy CALL and insert the copy into CALLER. */
2627 copy_callee (struct function_info *caller, const struct call_info *call)
2629 struct call_info *callee;
2630 callee = bfd_malloc (sizeof (*callee));
2634 if (!insert_callee (caller, callee))
2639 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2640 overlay stub sections. */
2643 interesting_section (asection *s)
2645 return (s->output_section != bfd_abs_section_ptr
2646 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2647 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2651 /* Rummage through the relocs for SEC, looking for function calls.
2652 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2653 mark destination symbols on calls as being functions. Also
2654 look at branches, which may be tail calls or go to hot/cold
2655 section part of same function. */
2658 mark_functions_via_relocs (asection *sec,
2659 struct bfd_link_info *info,
2662 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2663 Elf_Internal_Shdr *symtab_hdr;
2665 unsigned int priority = 0;
2666 static bfd_boolean warned;
2668 if (!interesting_section (sec)
2669 || sec->reloc_count == 0)
2672 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2674 if (internal_relocs == NULL)
2677 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2678 psyms = &symtab_hdr->contents;
2679 irela = internal_relocs;
2680 irelaend = irela + sec->reloc_count;
2681 for (; irela < irelaend; irela++)
2683 enum elf_spu_reloc_type r_type;
2684 unsigned int r_indx;
2686 Elf_Internal_Sym *sym;
2687 struct elf_link_hash_entry *h;
2689 bfd_boolean nonbranch, is_call;
2690 struct function_info *caller;
2691 struct call_info *callee;
2693 r_type = ELF32_R_TYPE (irela->r_info);
2694 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2696 r_indx = ELF32_R_SYM (irela->r_info);
2697 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2701 || sym_sec->output_section == bfd_abs_section_ptr)
2707 unsigned char insn[4];
2709 if (!bfd_get_section_contents (sec->owner, sec, insn,
2710 irela->r_offset, 4))
2712 if (is_branch (insn))
2714 is_call = (insn[0] & 0xfd) == 0x31;
2715 priority = insn[1] & 0x0f;
2717 priority |= insn[2];
2719 priority |= insn[3];
2721 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2722 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2725 info->callbacks->einfo
2726 (_("%B(%A+0x%v): call to non-code section"
2727 " %B(%A), analysis incomplete\n"),
2728 sec->owner, sec, irela->r_offset,
2729 sym_sec->owner, sym_sec);
2744 /* For --auto-overlay, count possible stubs we need for
2745 function pointer references. */
2746 unsigned int sym_type;
2750 sym_type = ELF_ST_TYPE (sym->st_info);
2751 if (sym_type == STT_FUNC)
2753 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2754 spu_hash_table (info)->non_ovly_stub += 1;
2755 /* If the symbol type is STT_FUNC then this must be a
2756 function pointer initialisation. */
2759 /* Ignore data references. */
2760 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2761 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2763 /* Otherwise we probably have a jump table reloc for
2764 a switch statement or some other reference to a
2769 val = h->root.u.def.value;
2771 val = sym->st_value;
2772 val += irela->r_addend;
2776 struct function_info *fun;
2778 if (irela->r_addend != 0)
2780 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2783 fake->st_value = val;
2785 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2789 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2791 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2794 if (irela->r_addend != 0
2795 && fun->u.sym != sym)
2800 caller = find_function (sec, irela->r_offset, info);
2803 callee = bfd_malloc (sizeof *callee);
2807 callee->fun = find_function (sym_sec, val, info);
2808 if (callee->fun == NULL)
2810 callee->is_tail = !is_call;
2811 callee->is_pasted = FALSE;
2812 callee->broken_cycle = FALSE;
2813 callee->priority = priority;
2814 callee->count = nonbranch? 0 : 1;
2815 if (callee->fun->last_caller != sec)
2817 callee->fun->last_caller = sec;
2818 callee->fun->call_count += 1;
2820 if (!insert_callee (caller, callee))
2823 && !callee->fun->is_func
2824 && callee->fun->stack == 0)
2826 /* This is either a tail call or a branch from one part of
2827 the function to another, ie. hot/cold section. If the
2828 destination has been called by some other function then
2829 it is a separate function. We also assume that functions
2830 are not split across input files. */
2831 if (sec->owner != sym_sec->owner)
2833 callee->fun->start = NULL;
2834 callee->fun->is_func = TRUE;
2836 else if (callee->fun->start == NULL)
2838 struct function_info *caller_start = caller;
2839 while (caller_start->start)
2840 caller_start = caller_start->start;
2842 if (caller_start != callee->fun)
2843 callee->fun->start = caller_start;
2847 struct function_info *callee_start;
2848 struct function_info *caller_start;
2849 callee_start = callee->fun;
2850 while (callee_start->start)
2851 callee_start = callee_start->start;
2852 caller_start = caller;
2853 while (caller_start->start)
2854 caller_start = caller_start->start;
2855 if (caller_start != callee_start)
2857 callee->fun->start = NULL;
2858 callee->fun->is_func = TRUE;
2867 /* Handle something like .init or .fini, which has a piece of a function.
2868 These sections are pasted together to form a single function. */
2871 pasted_function (asection *sec)
2873 struct bfd_link_order *l;
2874 struct _spu_elf_section_data *sec_data;
2875 struct spu_elf_stack_info *sinfo;
2876 Elf_Internal_Sym *fake;
2877 struct function_info *fun, *fun_start;
2879 fake = bfd_zmalloc (sizeof (*fake));
2883 fake->st_size = sec->size;
2885 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2886 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2890 /* Find a function immediately preceding this section. */
2892 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2894 if (l->u.indirect.section == sec)
2896 if (fun_start != NULL)
2898 struct call_info *callee = bfd_malloc (sizeof *callee);
2902 fun->start = fun_start;
2904 callee->is_tail = TRUE;
2905 callee->is_pasted = TRUE;
2906 callee->broken_cycle = FALSE;
2907 callee->priority = 0;
2909 if (!insert_callee (fun_start, callee))
2915 if (l->type == bfd_indirect_link_order
2916 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2917 && (sinfo = sec_data->u.i.stack_info) != NULL
2918 && sinfo->num_fun != 0)
2919 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2922 /* Don't return an error if we did not find a function preceding this
2923 section. The section may have incorrect flags. */
2927 /* Map address ranges in code sections to functions. */
2930 discover_functions (struct bfd_link_info *info)
2934 Elf_Internal_Sym ***psym_arr;
2935 asection ***sec_arr;
2936 bfd_boolean gaps = FALSE;
2939 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2942 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2943 if (psym_arr == NULL)
2945 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2946 if (sec_arr == NULL)
2949 for (ibfd = info->input_bfds, bfd_idx = 0;
2951 ibfd = ibfd->link_next, bfd_idx++)
2953 extern const bfd_target bfd_elf32_spu_vec;
2954 Elf_Internal_Shdr *symtab_hdr;
2957 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2958 asection **psecs, **p;
2960 if (ibfd->xvec != &bfd_elf32_spu_vec)
2963 /* Read all the symbols. */
2964 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2965 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2969 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2970 if (interesting_section (sec))
2978 if (symtab_hdr->contents != NULL)
2980 /* Don't use cached symbols since the generic ELF linker
2981 code only reads local symbols, and we need globals too. */
2982 free (symtab_hdr->contents);
2983 symtab_hdr->contents = NULL;
2985 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2987 symtab_hdr->contents = (void *) syms;
2991 /* Select defined function symbols that are going to be output. */
2992 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2995 psym_arr[bfd_idx] = psyms;
2996 psecs = bfd_malloc (symcount * sizeof (*psecs));
2999 sec_arr[bfd_idx] = psecs;
3000 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3001 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3002 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3006 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3007 if (s != NULL && interesting_section (s))
3010 symcount = psy - psyms;
3013 /* Sort them by section and offset within section. */
3014 sort_syms_syms = syms;
3015 sort_syms_psecs = psecs;
3016 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3018 /* Now inspect the function symbols. */
3019 for (psy = psyms; psy < psyms + symcount; )
3021 asection *s = psecs[*psy - syms];
3022 Elf_Internal_Sym **psy2;
3024 for (psy2 = psy; ++psy2 < psyms + symcount; )
3025 if (psecs[*psy2 - syms] != s)
3028 if (!alloc_stack_info (s, psy2 - psy))
3033 /* First install info about properly typed and sized functions.
3034 In an ideal world this will cover all code sections, except
3035 when partitioning functions into hot and cold sections,
3036 and the horrible pasted together .init and .fini functions. */
3037 for (psy = psyms; psy < psyms + symcount; ++psy)
3040 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3042 asection *s = psecs[sy - syms];
3043 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3048 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3049 if (interesting_section (sec))
3050 gaps |= check_function_ranges (sec, info);
3055 /* See if we can discover more function symbols by looking at
3057 for (ibfd = info->input_bfds, bfd_idx = 0;
3059 ibfd = ibfd->link_next, bfd_idx++)
3063 if (psym_arr[bfd_idx] == NULL)
3066 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3067 if (!mark_functions_via_relocs (sec, info, FALSE))
3071 for (ibfd = info->input_bfds, bfd_idx = 0;
3073 ibfd = ibfd->link_next, bfd_idx++)
3075 Elf_Internal_Shdr *symtab_hdr;
3077 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3080 if ((psyms = psym_arr[bfd_idx]) == NULL)
3083 psecs = sec_arr[bfd_idx];
3085 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3086 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3089 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3090 if (interesting_section (sec))
3091 gaps |= check_function_ranges (sec, info);
3095 /* Finally, install all globals. */
3096 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3100 s = psecs[sy - syms];
3102 /* Global syms might be improperly typed functions. */
3103 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3104 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3106 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3112 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3114 extern const bfd_target bfd_elf32_spu_vec;
3117 if (ibfd->xvec != &bfd_elf32_spu_vec)
3120 /* Some of the symbols we've installed as marking the
3121 beginning of functions may have a size of zero. Extend
3122 the range of such functions to the beginning of the
3123 next symbol of interest. */
3124 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3125 if (interesting_section (sec))
3127 struct _spu_elf_section_data *sec_data;
3128 struct spu_elf_stack_info *sinfo;
3130 sec_data = spu_elf_section_data (sec);
3131 sinfo = sec_data->u.i.stack_info;
3132 if (sinfo != NULL && sinfo->num_fun != 0)
3135 bfd_vma hi = sec->size;
3137 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3139 sinfo->fun[fun_idx].hi = hi;
3140 hi = sinfo->fun[fun_idx].lo;
3143 sinfo->fun[0].lo = 0;
3145 /* No symbols in this section. Must be .init or .fini
3146 or something similar. */
3147 else if (!pasted_function (sec))
3153 for (ibfd = info->input_bfds, bfd_idx = 0;
3155 ibfd = ibfd->link_next, bfd_idx++)
3157 if (psym_arr[bfd_idx] == NULL)
3160 free (psym_arr[bfd_idx]);
3161 free (sec_arr[bfd_idx]);
3170 /* Iterate over all function_info we have collected, calling DOIT on
3171 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3175 for_each_node (bfd_boolean (*doit) (struct function_info *,
3176 struct bfd_link_info *,
3178 struct bfd_link_info *info,
3184 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3186 extern const bfd_target bfd_elf32_spu_vec;
3189 if (ibfd->xvec != &bfd_elf32_spu_vec)
3192 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3194 struct _spu_elf_section_data *sec_data;
3195 struct spu_elf_stack_info *sinfo;
3197 if ((sec_data = spu_elf_section_data (sec)) != NULL
3198 && (sinfo = sec_data->u.i.stack_info) != NULL)
3201 for (i = 0; i < sinfo->num_fun; ++i)
3202 if (!root_only || !sinfo->fun[i].non_root)
3203 if (!doit (&sinfo->fun[i], info, param))
3211 /* Transfer call info attached to struct function_info entries for
3212 all of a given function's sections to the first entry. */
3215 transfer_calls (struct function_info *fun,
3216 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3217 void *param ATTRIBUTE_UNUSED)
3219 struct function_info *start = fun->start;
3223 struct call_info *call, *call_next;
3225 while (start->start != NULL)
3226 start = start->start;
3227 for (call = fun->call_list; call != NULL; call = call_next)
3229 call_next = call->next;
3230 if (!insert_callee (start, call))
3233 fun->call_list = NULL;
3238 /* Mark nodes in the call graph that are called by some other node. */
3241 mark_non_root (struct function_info *fun,
3242 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3243 void *param ATTRIBUTE_UNUSED)
3245 struct call_info *call;
3250 for (call = fun->call_list; call; call = call->next)
3252 call->fun->non_root = TRUE;
3253 mark_non_root (call->fun, 0, 0);
3258 /* Remove cycles from the call graph. Set depth of nodes. */
3261 remove_cycles (struct function_info *fun,
3262 struct bfd_link_info *info,
3265 struct call_info **callp, *call;
3266 unsigned int depth = *(unsigned int *) param;
3267 unsigned int max_depth = depth;
3271 fun->marking = TRUE;
3273 callp = &fun->call_list;
3274 while ((call = *callp) != NULL)
3276 call->max_depth = depth + !call->is_pasted;
3277 if (!call->fun->visit2)
3279 if (!remove_cycles (call->fun, info, &call->max_depth))
3281 if (max_depth < call->max_depth)
3282 max_depth = call->max_depth;
3284 else if (call->fun->marking)
3286 struct spu_link_hash_table *htab = spu_hash_table (info);
3288 if (!htab->params->auto_overlay
3289 && htab->params->stack_analysis)
3291 const char *f1 = func_name (fun);
3292 const char *f2 = func_name (call->fun);
3294 info->callbacks->info (_("Stack analysis will ignore the call "
3299 call->broken_cycle = TRUE;
3301 callp = &call->next;
3303 fun->marking = FALSE;
3304 *(unsigned int *) param = max_depth;
3308 /* Check that we actually visited all nodes in remove_cycles. If we
3309 didn't, then there is some cycle in the call graph not attached to
3310 any root node. Arbitrarily choose a node in the cycle as a new
3311 root and break the cycle. */
3314 mark_detached_root (struct function_info *fun,
3315 struct bfd_link_info *info,
3320 fun->non_root = FALSE;
3321 *(unsigned int *) param = 0;
3322 return remove_cycles (fun, info, param);
3325 /* Populate call_list for each function. */
3328 build_call_tree (struct bfd_link_info *info)
3333 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3335 extern const bfd_target bfd_elf32_spu_vec;
3338 if (ibfd->xvec != &bfd_elf32_spu_vec)
3341 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3342 if (!mark_functions_via_relocs (sec, info, TRUE))
3346 /* Transfer call info from hot/cold section part of function
3348 if (!spu_hash_table (info)->params->auto_overlay
3349 && !for_each_node (transfer_calls, info, 0, FALSE))
3352 /* Find the call graph root(s). */
3353 if (!for_each_node (mark_non_root, info, 0, FALSE))
3356 /* Remove cycles from the call graph. We start from the root node(s)
3357 so that we break cycles in a reasonable place. */
3359 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3362 return for_each_node (mark_detached_root, info, &depth, FALSE);
3365 /* qsort predicate to sort calls by priority, max_depth then count. */
3368 sort_calls (const void *a, const void *b)
3370 struct call_info *const *c1 = a;
3371 struct call_info *const *c2 = b;
3374 delta = (*c2)->priority - (*c1)->priority;
3378 delta = (*c2)->max_depth - (*c1)->max_depth;
3382 delta = (*c2)->count - (*c1)->count;
3386 return (char *) c1 - (char *) c2;
3390 unsigned int max_overlay_size;
3393 /* Set linker_mark and gc_mark on any sections that we will put in
3394 overlays. These flags are used by the generic ELF linker, but we
3395 won't be continuing on to bfd_elf_final_link so it is OK to use
3396 them. linker_mark is clear before we get here. Set segment_mark
3397 on sections that are part of a pasted function (excluding the last
3400 Set up function rodata section if --overlay-rodata. We don't
3401 currently include merged string constant rodata sections since
3403 Sort the call graph so that the deepest nodes will be visited
3407 mark_overlay_section (struct function_info *fun,
3408 struct bfd_link_info *info,
3411 struct call_info *call;
3413 struct _mos_param *mos_param = param;
3414 struct spu_link_hash_table *htab = spu_hash_table (info);
3420 if (!fun->sec->linker_mark
3421 && (htab->params->ovly_flavour != ovly_soft_icache
3422 || htab->params->non_ia_text
3423 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3424 || strcmp (fun->sec->name, ".init") == 0
3425 || strcmp (fun->sec->name, ".fini") == 0))
3429 fun->sec->linker_mark = 1;
3430 fun->sec->gc_mark = 1;
3431 fun->sec->segment_mark = 0;
3432 /* Ensure SEC_CODE is set on this text section (it ought to
3433 be!), and SEC_CODE is clear on rodata sections. We use
3434 this flag to differentiate the two overlay section types. */
3435 fun->sec->flags |= SEC_CODE;
3437 size = fun->sec->size;
3438 if (htab->params->auto_overlay & OVERLAY_RODATA)
3442 /* Find the rodata section corresponding to this function's
3444 if (strcmp (fun->sec->name, ".text") == 0)
3446 name = bfd_malloc (sizeof (".rodata"));
3449 memcpy (name, ".rodata", sizeof (".rodata"));
3451 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3453 size_t len = strlen (fun->sec->name);
3454 name = bfd_malloc (len + 3);
3457 memcpy (name, ".rodata", sizeof (".rodata"));
3458 memcpy (name + 7, fun->sec->name + 5, len - 4);
3460 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3462 size_t len = strlen (fun->sec->name) + 1;
3463 name = bfd_malloc (len);
3466 memcpy (name, fun->sec->name, len);
3472 asection *rodata = NULL;
3473 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3474 if (group_sec == NULL)
3475 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3477 while (group_sec != NULL && group_sec != fun->sec)
3479 if (strcmp (group_sec->name, name) == 0)
3484 group_sec = elf_section_data (group_sec)->next_in_group;
3486 fun->rodata = rodata;
3489 size += fun->rodata->size;
3490 if (htab->params->line_size != 0
3491 && size > htab->params->line_size)
3493 size -= fun->rodata->size;
3498 fun->rodata->linker_mark = 1;
3499 fun->rodata->gc_mark = 1;
3500 fun->rodata->flags &= ~SEC_CODE;
3506 if (mos_param->max_overlay_size < size)
3507 mos_param->max_overlay_size = size;
3510 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3515 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3519 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3520 calls[count++] = call;
3522 qsort (calls, count, sizeof (*calls), sort_calls);
3524 fun->call_list = NULL;
3528 calls[count]->next = fun->call_list;
3529 fun->call_list = calls[count];
3534 for (call = fun->call_list; call != NULL; call = call->next)
3536 if (call->is_pasted)
3538 /* There can only be one is_pasted call per function_info. */
3539 BFD_ASSERT (!fun->sec->segment_mark);
3540 fun->sec->segment_mark = 1;
3542 if (!call->broken_cycle
3543 && !mark_overlay_section (call->fun, info, param))
3547 /* Don't put entry code into an overlay. The overlay manager needs
3548 a stack! Also, don't mark .ovl.init as an overlay. */
3549 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3550 == info->output_bfd->start_address
3551 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3553 fun->sec->linker_mark = 0;
3554 if (fun->rodata != NULL)
3555 fun->rodata->linker_mark = 0;
3560 /* If non-zero then unmark functions called from those within sections
3561 that we need to unmark. Unfortunately this isn't reliable since the
3562 call graph cannot know the destination of function pointer calls. */
3563 #define RECURSE_UNMARK 0
3566 asection *exclude_input_section;
3567 asection *exclude_output_section;
3568 unsigned long clearing;
3571 /* Undo some of mark_overlay_section's work. */
3574 unmark_overlay_section (struct function_info *fun,
3575 struct bfd_link_info *info,
3578 struct call_info *call;
3579 struct _uos_param *uos_param = param;
3580 unsigned int excluded = 0;
3588 if (fun->sec == uos_param->exclude_input_section
3589 || fun->sec->output_section == uos_param->exclude_output_section)
3593 uos_param->clearing += excluded;
3595 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3597 fun->sec->linker_mark = 0;
3599 fun->rodata->linker_mark = 0;
3602 for (call = fun->call_list; call != NULL; call = call->next)
3603 if (!call->broken_cycle
3604 && !unmark_overlay_section (call->fun, info, param))
3608 uos_param->clearing -= excluded;
3613 unsigned int lib_size;
3614 asection **lib_sections;
3617 /* Add sections we have marked as belonging to overlays to an array
3618 for consideration as non-overlay sections. The array consist of
3619 pairs of sections, (text,rodata), for functions in the call graph. */
3622 collect_lib_sections (struct function_info *fun,
3623 struct bfd_link_info *info,
3626 struct _cl_param *lib_param = param;
3627 struct call_info *call;
3634 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3637 size = fun->sec->size;
3639 size += fun->rodata->size;
3641 if (size <= lib_param->lib_size)
3643 *lib_param->lib_sections++ = fun->sec;
3644 fun->sec->gc_mark = 0;
3645 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3647 *lib_param->lib_sections++ = fun->rodata;
3648 fun->rodata->gc_mark = 0;
3651 *lib_param->lib_sections++ = NULL;
3654 for (call = fun->call_list; call != NULL; call = call->next)
3655 if (!call->broken_cycle)
3656 collect_lib_sections (call->fun, info, param);
3661 /* qsort predicate to sort sections by call count. */
3664 sort_lib (const void *a, const void *b)
3666 asection *const *s1 = a;
3667 asection *const *s2 = b;
3668 struct _spu_elf_section_data *sec_data;
3669 struct spu_elf_stack_info *sinfo;
3673 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3674 && (sinfo = sec_data->u.i.stack_info) != NULL)
3677 for (i = 0; i < sinfo->num_fun; ++i)
3678 delta -= sinfo->fun[i].call_count;
3681 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3682 && (sinfo = sec_data->u.i.stack_info) != NULL)
3685 for (i = 0; i < sinfo->num_fun; ++i)
3686 delta += sinfo->fun[i].call_count;
3695 /* Remove some sections from those marked to be in overlays. Choose
3696 those that are called from many places, likely library functions. */
3699 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3702 asection **lib_sections;
3703 unsigned int i, lib_count;
3704 struct _cl_param collect_lib_param;
3705 struct function_info dummy_caller;
3706 struct spu_link_hash_table *htab;
3708 memset (&dummy_caller, 0, sizeof (dummy_caller));
3710 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3712 extern const bfd_target bfd_elf32_spu_vec;
3715 if (ibfd->xvec != &bfd_elf32_spu_vec)
3718 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3719 if (sec->linker_mark
3720 && sec->size < lib_size
3721 && (sec->flags & SEC_CODE) != 0)
3724 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3725 if (lib_sections == NULL)
3726 return (unsigned int) -1;
3727 collect_lib_param.lib_size = lib_size;
3728 collect_lib_param.lib_sections = lib_sections;
3729 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3731 return (unsigned int) -1;
3732 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3734 /* Sort sections so that those with the most calls are first. */
3736 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3738 htab = spu_hash_table (info);
3739 for (i = 0; i < lib_count; i++)
3741 unsigned int tmp, stub_size;
3743 struct _spu_elf_section_data *sec_data;
3744 struct spu_elf_stack_info *sinfo;
3746 sec = lib_sections[2 * i];
3747 /* If this section is OK, its size must be less than lib_size. */
3749 /* If it has a rodata section, then add that too. */
3750 if (lib_sections[2 * i + 1])
3751 tmp += lib_sections[2 * i + 1]->size;
3752 /* Add any new overlay call stubs needed by the section. */
3755 && (sec_data = spu_elf_section_data (sec)) != NULL
3756 && (sinfo = sec_data->u.i.stack_info) != NULL)
3759 struct call_info *call;
3761 for (k = 0; k < sinfo->num_fun; ++k)
3762 for (call = sinfo->fun[k].call_list; call; call = call->next)
3763 if (call->fun->sec->linker_mark)
3765 struct call_info *p;
3766 for (p = dummy_caller.call_list; p; p = p->next)
3767 if (p->fun == call->fun)
3770 stub_size += ovl_stub_size (htab->params);
3773 if (tmp + stub_size < lib_size)
3775 struct call_info **pp, *p;
3777 /* This section fits. Mark it as non-overlay. */
3778 lib_sections[2 * i]->linker_mark = 0;
3779 if (lib_sections[2 * i + 1])
3780 lib_sections[2 * i + 1]->linker_mark = 0;
3781 lib_size -= tmp + stub_size;
3782 /* Call stubs to the section we just added are no longer
3784 pp = &dummy_caller.call_list;
3785 while ((p = *pp) != NULL)
3786 if (!p->fun->sec->linker_mark)
3788 lib_size += ovl_stub_size (htab->params);
3794 /* Add new call stubs to dummy_caller. */
3795 if ((sec_data = spu_elf_section_data (sec)) != NULL
3796 && (sinfo = sec_data->u.i.stack_info) != NULL)
3799 struct call_info *call;
3801 for (k = 0; k < sinfo->num_fun; ++k)
3802 for (call = sinfo->fun[k].call_list;
3805 if (call->fun->sec->linker_mark)
3807 struct call_info *callee;
3808 callee = bfd_malloc (sizeof (*callee));
3810 return (unsigned int) -1;
3812 if (!insert_callee (&dummy_caller, callee))
3818 while (dummy_caller.call_list != NULL)
3820 struct call_info *call = dummy_caller.call_list;
3821 dummy_caller.call_list = call->next;
3824 for (i = 0; i < 2 * lib_count; i++)
3825 if (lib_sections[i])
3826 lib_sections[i]->gc_mark = 1;
3827 free (lib_sections);
3831 /* Build an array of overlay sections. The deepest node's section is
3832 added first, then its parent node's section, then everything called
3833 from the parent section. The idea being to group sections to
3834 minimise calls between different overlays. */
3837 collect_overlays (struct function_info *fun,
3838 struct bfd_link_info *info,
3841 struct call_info *call;
3842 bfd_boolean added_fun;
3843 asection ***ovly_sections = param;
3849 for (call = fun->call_list; call != NULL; call = call->next)
3850 if (!call->is_pasted && !call->broken_cycle)
3852 if (!collect_overlays (call->fun, info, ovly_sections))
3858 if (fun->sec->linker_mark && fun->sec->gc_mark)
3860 fun->sec->gc_mark = 0;
3861 *(*ovly_sections)++ = fun->sec;
3862 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3864 fun->rodata->gc_mark = 0;
3865 *(*ovly_sections)++ = fun->rodata;
3868 *(*ovly_sections)++ = NULL;
3871 /* Pasted sections must stay with the first section. We don't
3872 put pasted sections in the array, just the first section.
3873 Mark subsequent sections as already considered. */
3874 if (fun->sec->segment_mark)
3876 struct function_info *call_fun = fun;
3879 for (call = call_fun->call_list; call != NULL; call = call->next)
3880 if (call->is_pasted)
3882 call_fun = call->fun;
3883 call_fun->sec->gc_mark = 0;
3884 if (call_fun->rodata)
3885 call_fun->rodata->gc_mark = 0;
3891 while (call_fun->sec->segment_mark);
3895 for (call = fun->call_list; call != NULL; call = call->next)
3896 if (!call->broken_cycle
3897 && !collect_overlays (call->fun, info, ovly_sections))
3902 struct _spu_elf_section_data *sec_data;
3903 struct spu_elf_stack_info *sinfo;
3905 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3906 && (sinfo = sec_data->u.i.stack_info) != NULL)
3909 for (i = 0; i < sinfo->num_fun; ++i)
3910 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3918 struct _sum_stack_param {
3920 size_t overall_stack;
3921 bfd_boolean emit_stack_syms;
3924 /* Descend the call graph for FUN, accumulating total stack required. */
3927 sum_stack (struct function_info *fun,
3928 struct bfd_link_info *info,
3931 struct call_info *call;
3932 struct function_info *max;
3933 size_t stack, cum_stack;
3935 bfd_boolean has_call;
3936 struct _sum_stack_param *sum_stack_param = param;
3937 struct spu_link_hash_table *htab;
3939 cum_stack = fun->stack;
3940 sum_stack_param->cum_stack = cum_stack;
3946 for (call = fun->call_list; call; call = call->next)
3948 if (call->broken_cycle)
3950 if (!call->is_pasted)
3952 if (!sum_stack (call->fun, info, sum_stack_param))
3954 stack = sum_stack_param->cum_stack;
3955 /* Include caller stack for normal calls, don't do so for
3956 tail calls. fun->stack here is local stack usage for
3958 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3959 stack += fun->stack;
3960 if (cum_stack < stack)
3967 sum_stack_param->cum_stack = cum_stack;
3969 /* Now fun->stack holds cumulative stack. */
3970 fun->stack = cum_stack;
3974 && sum_stack_param->overall_stack < cum_stack)
3975 sum_stack_param->overall_stack = cum_stack;
3977 htab = spu_hash_table (info);
3978 if (htab->params->auto_overlay)
3981 f1 = func_name (fun);
3982 if (htab->params->stack_analysis)
3985 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3986 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3987 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3991 info->callbacks->minfo (_(" calls:\n"));
3992 for (call = fun->call_list; call; call = call->next)
3993 if (!call->is_pasted && !call->broken_cycle)
3995 const char *f2 = func_name (call->fun);
3996 const char *ann1 = call->fun == max ? "*" : " ";
3997 const char *ann2 = call->is_tail ? "t" : " ";
3999 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
4004 if (sum_stack_param->emit_stack_syms)
4006 char *name = bfd_malloc (18 + strlen (f1));
4007 struct elf_link_hash_entry *h;
4012 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4013 sprintf (name, "__stack_%s", f1);
4015 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4017 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4020 && (h->root.type == bfd_link_hash_new
4021 || h->root.type == bfd_link_hash_undefined
4022 || h->root.type == bfd_link_hash_undefweak))
4024 h->root.type = bfd_link_hash_defined;
4025 h->root.u.def.section = bfd_abs_section_ptr;
4026 h->root.u.def.value = cum_stack;
4031 h->ref_regular_nonweak = 1;
4032 h->forced_local = 1;
4040 /* SEC is part of a pasted function. Return the call_info for the
4041 next section of this function. */
4043 static struct call_info *
4044 find_pasted_call (asection *sec)
4046 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4047 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4048 struct call_info *call;
4051 for (k = 0; k < sinfo->num_fun; ++k)
4052 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4053 if (call->is_pasted)
4059 /* qsort predicate to sort bfds by file name. */
4062 sort_bfds (const void *a, const void *b)
4064 bfd *const *abfd1 = a;
4065 bfd *const *abfd2 = b;
4067 return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4071 print_one_overlay_section (FILE *script,
4074 unsigned int ovlynum,
4075 unsigned int *ovly_map,
4076 asection **ovly_sections,
4077 struct bfd_link_info *info)
4081 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4083 asection *sec = ovly_sections[2 * j];
4085 if (fprintf (script, " %s%c%s (%s)\n",
4086 (sec->owner->my_archive != NULL
4087 ? sec->owner->my_archive->filename : ""),
4088 info->path_separator,
4089 sec->owner->filename,
4092 if (sec->segment_mark)
4094 struct call_info *call = find_pasted_call (sec);
4095 while (call != NULL)
4097 struct function_info *call_fun = call->fun;
4098 sec = call_fun->sec;
4099 if (fprintf (script, " %s%c%s (%s)\n",
4100 (sec->owner->my_archive != NULL
4101 ? sec->owner->my_archive->filename : ""),
4102 info->path_separator,
4103 sec->owner->filename,
4106 for (call = call_fun->call_list; call; call = call->next)
4107 if (call->is_pasted)
4113 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4115 asection *sec = ovly_sections[2 * j + 1];
4117 && fprintf (script, " %s%c%s (%s)\n",
4118 (sec->owner->my_archive != NULL
4119 ? sec->owner->my_archive->filename : ""),
4120 info->path_separator,
4121 sec->owner->filename,
4125 sec = ovly_sections[2 * j];
4126 if (sec->segment_mark)
4128 struct call_info *call = find_pasted_call (sec);
4129 while (call != NULL)
4131 struct function_info *call_fun = call->fun;
4132 sec = call_fun->rodata;
4134 && fprintf (script, " %s%c%s (%s)\n",
4135 (sec->owner->my_archive != NULL
4136 ? sec->owner->my_archive->filename : ""),
4137 info->path_separator,
4138 sec->owner->filename,
4141 for (call = call_fun->call_list; call; call = call->next)
4142 if (call->is_pasted)
4151 /* Handle --auto-overlay. */
4154 spu_elf_auto_overlay (struct bfd_link_info *info)
4158 struct elf_segment_map *m;
4159 unsigned int fixed_size, lo, hi;
4160 unsigned int reserved;
4161 struct spu_link_hash_table *htab;
4162 unsigned int base, i, count, bfd_count;
4163 unsigned int region, ovlynum;
4164 asection **ovly_sections, **ovly_p;
4165 unsigned int *ovly_map;
4167 unsigned int total_overlay_size, overlay_size;
4168 const char *ovly_mgr_entry;
4169 struct elf_link_hash_entry *h;
4170 struct _mos_param mos_param;
4171 struct _uos_param uos_param;
4172 struct function_info dummy_caller;
4174 /* Find the extents of our loadable image. */
4175 lo = (unsigned int) -1;
4177 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4178 if (m->p_type == PT_LOAD)
4179 for (i = 0; i < m->count; i++)
4180 if (m->sections[i]->size != 0)
4182 if (m->sections[i]->vma < lo)
4183 lo = m->sections[i]->vma;
4184 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4185 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4187 fixed_size = hi + 1 - lo;
4189 if (!discover_functions (info))
4192 if (!build_call_tree (info))
4195 htab = spu_hash_table (info);
4196 reserved = htab->params->auto_overlay_reserved;
4199 struct _sum_stack_param sum_stack_param;
4201 sum_stack_param.emit_stack_syms = 0;
4202 sum_stack_param.overall_stack = 0;
4203 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4205 reserved = (sum_stack_param.overall_stack
4206 + htab->params->extra_stack_space);
4209 /* No need for overlays if everything already fits. */
4210 if (fixed_size + reserved <= htab->local_store
4211 && htab->params->ovly_flavour != ovly_soft_icache)
4213 htab->params->auto_overlay = 0;
4217 uos_param.exclude_input_section = 0;
4218 uos_param.exclude_output_section
4219 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4221 ovly_mgr_entry = "__ovly_load";
4222 if (htab->params->ovly_flavour == ovly_soft_icache)
4223 ovly_mgr_entry = "__icache_br_handler";
4224 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4225 FALSE, FALSE, FALSE);
4227 && (h->root.type == bfd_link_hash_defined
4228 || h->root.type == bfd_link_hash_defweak)
4231 /* We have a user supplied overlay manager. */
4232 uos_param.exclude_input_section = h->root.u.def.section;
4236 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4237 builtin version to .text, and will adjust .text size. */
4238 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4241 /* Mark overlay sections, and find max overlay section size. */
4242 mos_param.max_overlay_size = 0;
4243 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4246 /* We can't put the overlay manager or interrupt routines in
4248 uos_param.clearing = 0;
4249 if ((uos_param.exclude_input_section
4250 || uos_param.exclude_output_section)
4251 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4255 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4257 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4258 if (bfd_arr == NULL)
4261 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4264 total_overlay_size = 0;
4265 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4267 extern const bfd_target bfd_elf32_spu_vec;
4269 unsigned int old_count;
4271 if (ibfd->xvec != &bfd_elf32_spu_vec)
4275 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4276 if (sec->linker_mark)
4278 if ((sec->flags & SEC_CODE) != 0)
4280 fixed_size -= sec->size;
4281 total_overlay_size += sec->size;
4283 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4284 && sec->output_section->owner == info->output_bfd
4285 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4286 fixed_size -= sec->size;
4287 if (count != old_count)
4288 bfd_arr[bfd_count++] = ibfd;
4291 /* Since the overlay link script selects sections by file name and
4292 section name, ensure that file names are unique. */
4295 bfd_boolean ok = TRUE;
4297 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4298 for (i = 1; i < bfd_count; ++i)
4299 if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4301 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4303 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4304 info->callbacks->einfo (_("%s duplicated in %s\n"),
4305 bfd_arr[i]->filename,
4306 bfd_arr[i]->my_archive->filename);
4308 info->callbacks->einfo (_("%s duplicated\n"),
4309 bfd_arr[i]->filename);
4315 info->callbacks->einfo (_("sorry, no support for duplicate "
4316 "object files in auto-overlay script\n"));
4317 bfd_set_error (bfd_error_bad_value);
4323 fixed_size += reserved;
4324 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4325 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4327 if (htab->params->ovly_flavour == ovly_soft_icache)
4329 /* Stubs in the non-icache area are bigger. */
4330 fixed_size += htab->non_ovly_stub * 16;
4331 /* Space for icache manager tables.
4332 a) Tag array, one quadword per cache line.
4333 - word 0: ia address of present line, init to zero. */
4334 fixed_size += 16 << htab->num_lines_log2;
4335 /* b) Rewrite "to" list, one quadword per cache line. */
4336 fixed_size += 16 << htab->num_lines_log2;
4337 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4338 to a power-of-two number of full quadwords) per cache line. */
4339 fixed_size += 16 << (htab->fromelem_size_log2
4340 + htab->num_lines_log2);
4341 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4346 /* Guess number of overlays. Assuming overlay buffer is on
4347 average only half full should be conservative. */
4348 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4349 / (htab->local_store - fixed_size));
4350 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4351 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4355 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4356 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4357 "size of 0x%v exceeds local store\n"),
4358 (bfd_vma) fixed_size,
4359 (bfd_vma) mos_param.max_overlay_size);
4361 /* Now see if we should put some functions in the non-overlay area. */
4362 else if (fixed_size < htab->params->auto_overlay_fixed)
4364 unsigned int max_fixed, lib_size;
4366 max_fixed = htab->local_store - mos_param.max_overlay_size;
4367 if (max_fixed > htab->params->auto_overlay_fixed)
4368 max_fixed = htab->params->auto_overlay_fixed;
4369 lib_size = max_fixed - fixed_size;
4370 lib_size = auto_ovl_lib_functions (info, lib_size);
4371 if (lib_size == (unsigned int) -1)
4373 fixed_size = max_fixed - lib_size;
4376 /* Build an array of sections, suitably sorted to place into
4378 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4379 if (ovly_sections == NULL)
4381 ovly_p = ovly_sections;
4382 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4384 count = (size_t) (ovly_p - ovly_sections) / 2;
4385 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4386 if (ovly_map == NULL)
4389 memset (&dummy_caller, 0, sizeof (dummy_caller));
4390 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4391 if (htab->params->line_size != 0)
4392 overlay_size = htab->params->line_size;
4395 while (base < count)
4397 unsigned int size = 0, rosize = 0, roalign = 0;
4399 for (i = base; i < count; i++)
4401 asection *sec, *rosec;
4402 unsigned int tmp, rotmp;
4403 unsigned int num_stubs;
4404 struct call_info *call, *pasty;
4405 struct _spu_elf_section_data *sec_data;
4406 struct spu_elf_stack_info *sinfo;
4409 /* See whether we can add this section to the current
4410 overlay without overflowing our overlay buffer. */
4411 sec = ovly_sections[2 * i];
4412 tmp = align_power (size, sec->alignment_power) + sec->size;
4414 rosec = ovly_sections[2 * i + 1];
4417 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4418 if (roalign < rosec->alignment_power)
4419 roalign = rosec->alignment_power;
4421 if (align_power (tmp, roalign) + rotmp > overlay_size)
4423 if (sec->segment_mark)
4425 /* Pasted sections must stay together, so add their
4427 pasty = find_pasted_call (sec);
4428 while (pasty != NULL)
4430 struct function_info *call_fun = pasty->fun;
4431 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4432 + call_fun->sec->size);
4433 if (call_fun->rodata)
4435 rotmp = (align_power (rotmp,
4436 call_fun->rodata->alignment_power)
4437 + call_fun->rodata->size);
4438 if (roalign < rosec->alignment_power)
4439 roalign = rosec->alignment_power;
4441 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4442 if (pasty->is_pasted)
4446 if (align_power (tmp, roalign) + rotmp > overlay_size)
4449 /* If we add this section, we might need new overlay call
4450 stubs. Add any overlay section calls to dummy_call. */
4452 sec_data = spu_elf_section_data (sec);
4453 sinfo = sec_data->u.i.stack_info;
4454 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4455 for (call = sinfo->fun[k].call_list; call; call = call->next)
4456 if (call->is_pasted)
4458 BFD_ASSERT (pasty == NULL);
4461 else if (call->fun->sec->linker_mark)
4463 if (!copy_callee (&dummy_caller, call))
4466 while (pasty != NULL)
4468 struct function_info *call_fun = pasty->fun;
4470 for (call = call_fun->call_list; call; call = call->next)
4471 if (call->is_pasted)
4473 BFD_ASSERT (pasty == NULL);
4476 else if (!copy_callee (&dummy_caller, call))
4480 /* Calculate call stub size. */
4482 for (call = dummy_caller.call_list; call; call = call->next)
4484 unsigned int stub_delta = 1;
4486 if (htab->params->ovly_flavour == ovly_soft_icache)
4487 stub_delta = call->count;
4488 num_stubs += stub_delta;
4490 /* If the call is within this overlay, we won't need a
4492 for (k = base; k < i + 1; k++)
4493 if (call->fun->sec == ovly_sections[2 * k])
4495 num_stubs -= stub_delta;
4499 if (htab->params->ovly_flavour == ovly_soft_icache
4500 && num_stubs > htab->params->max_branch)
4502 if (align_power (tmp, roalign) + rotmp
4503 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4511 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4512 ovly_sections[2 * i]->owner,
4513 ovly_sections[2 * i],
4514 ovly_sections[2 * i + 1] ? " + rodata" : "");
4515 bfd_set_error (bfd_error_bad_value);
4519 while (dummy_caller.call_list != NULL)
4521 struct call_info *call = dummy_caller.call_list;
4522 dummy_caller.call_list = call->next;
4528 ovly_map[base++] = ovlynum;
4531 script = htab->params->spu_elf_open_overlay_script ();
4533 if (htab->params->ovly_flavour == ovly_soft_icache)
4535 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4538 if (fprintf (script,
4539 " . = ALIGN (%u);\n"
4540 " .ovl.init : { *(.ovl.init) }\n"
4541 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4542 htab->params->line_size) <= 0)
4547 while (base < count)
4549 unsigned int indx = ovlynum - 1;
4550 unsigned int vma, lma;
4552 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4553 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4555 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4556 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4557 ovlynum, vma, lma) <= 0)
4560 base = print_one_overlay_section (script, base, count, ovlynum,
4561 ovly_map, ovly_sections, info);
4562 if (base == (unsigned) -1)
4565 if (fprintf (script, " }\n") <= 0)
4571 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4572 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4575 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4580 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4583 if (fprintf (script,
4584 " . = ALIGN (16);\n"
4585 " .ovl.init : { *(.ovl.init) }\n"
4586 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4589 for (region = 1; region <= htab->params->num_lines; region++)
4593 while (base < count && ovly_map[base] < ovlynum)
4601 /* We need to set lma since we are overlaying .ovl.init. */
4602 if (fprintf (script,
4603 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4608 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4612 while (base < count)
4614 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4617 base = print_one_overlay_section (script, base, count, ovlynum,
4618 ovly_map, ovly_sections, info);
4619 if (base == (unsigned) -1)
4622 if (fprintf (script, " }\n") <= 0)
4625 ovlynum += htab->params->num_lines;
4626 while (base < count && ovly_map[base] < ovlynum)
4630 if (fprintf (script, " }\n") <= 0)
4634 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4639 free (ovly_sections);
4641 if (fclose (script) != 0)
4644 if (htab->params->auto_overlay & AUTO_RELINK)
4645 (*htab->params->spu_elf_relink) ();
4650 bfd_set_error (bfd_error_system_call);
4652 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4656 /* Provide an estimate of total stack required. */
4659 spu_elf_stack_analysis (struct bfd_link_info *info)
4661 struct spu_link_hash_table *htab;
4662 struct _sum_stack_param sum_stack_param;
4664 if (!discover_functions (info))
4667 if (!build_call_tree (info))
4670 htab = spu_hash_table (info);
4671 if (htab->params->stack_analysis)
4673 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4674 info->callbacks->minfo (_("\nStack size for functions. "
4675 "Annotations: '*' max stack, 't' tail call\n"));
4678 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4679 sum_stack_param.overall_stack = 0;
4680 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4683 if (htab->params->stack_analysis)
4684 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4685 (bfd_vma) sum_stack_param.overall_stack);
4689 /* Perform a final link. */
4692 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4694 struct spu_link_hash_table *htab = spu_hash_table (info);
4696 if (htab->params->auto_overlay)
4697 spu_elf_auto_overlay (info);
4699 if ((htab->params->stack_analysis
4700 || (htab->params->ovly_flavour == ovly_soft_icache
4701 && htab->params->lrlive_analysis))
4702 && !spu_elf_stack_analysis (info))
4703 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4705 if (!spu_elf_build_stubs (info))
4706 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4708 return bfd_elf_final_link (output_bfd, info);
4711 /* Called when not normally emitting relocs, ie. !info->relocatable
4712 and !info->emitrelocations. Returns a count of special relocs
4713 that need to be emitted. */
4716 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4718 Elf_Internal_Rela *relocs;
4719 unsigned int count = 0;
4721 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4725 Elf_Internal_Rela *rel;
4726 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4728 for (rel = relocs; rel < relend; rel++)
4730 int r_type = ELF32_R_TYPE (rel->r_info);
4731 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4735 if (elf_section_data (sec)->relocs != relocs)
4742 /* Functions for adding fixup records to .fixup */
4744 #define FIXUP_RECORD_SIZE 4
4746 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4747 bfd_put_32 (output_bfd, addr, \
4748 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4749 #define FIXUP_GET(output_bfd,htab,index) \
4750 bfd_get_32 (output_bfd, \
4751 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4753 /* Store OFFSET in .fixup. This assumes it will be called with an
4754 increasing OFFSET. When this OFFSET fits with the last base offset,
4755 it just sets a bit, otherwise it adds a new fixup record. */
4757 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4760 struct spu_link_hash_table *htab = spu_hash_table (info);
4761 asection *sfixup = htab->sfixup;
4762 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4763 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4764 if (sfixup->reloc_count == 0)
4766 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4767 sfixup->reloc_count++;
4771 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4772 if (qaddr != (base & ~(bfd_vma) 15))
4774 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4775 (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4776 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4777 sfixup->reloc_count++;
4780 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4784 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4787 spu_elf_relocate_section (bfd *output_bfd,
4788 struct bfd_link_info *info,
4790 asection *input_section,
4792 Elf_Internal_Rela *relocs,
4793 Elf_Internal_Sym *local_syms,
4794 asection **local_sections)
4796 Elf_Internal_Shdr *symtab_hdr;
4797 struct elf_link_hash_entry **sym_hashes;
4798 Elf_Internal_Rela *rel, *relend;
4799 struct spu_link_hash_table *htab;
4802 bfd_boolean emit_these_relocs = FALSE;
4803 bfd_boolean is_ea_sym;
4805 unsigned int iovl = 0;
4807 htab = spu_hash_table (info);
4808 stubs = (htab->stub_sec != NULL
4809 && maybe_needs_stubs (input_section));
4810 iovl = overlay_index (input_section);
4811 ea = bfd_get_section_by_name (output_bfd, "._ea");
4812 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4813 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4816 relend = relocs + input_section->reloc_count;
4817 for (; rel < relend; rel++)
4820 reloc_howto_type *howto;
4821 unsigned int r_symndx;
4822 Elf_Internal_Sym *sym;
4824 struct elf_link_hash_entry *h;
4825 const char *sym_name;
4828 bfd_reloc_status_type r;
4829 bfd_boolean unresolved_reloc;
4830 enum _stub_type stub_type;
4832 r_symndx = ELF32_R_SYM (rel->r_info);
4833 r_type = ELF32_R_TYPE (rel->r_info);
4834 howto = elf_howto_table + r_type;
4835 unresolved_reloc = FALSE;
4839 if (r_symndx < symtab_hdr->sh_info)
4841 sym = local_syms + r_symndx;
4842 sec = local_sections[r_symndx];
4843 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4844 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4848 if (sym_hashes == NULL)
4851 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4853 while (h->root.type == bfd_link_hash_indirect
4854 || h->root.type == bfd_link_hash_warning)
4855 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4858 if (h->root.type == bfd_link_hash_defined
4859 || h->root.type == bfd_link_hash_defweak)
4861 sec = h->root.u.def.section;
4863 || sec->output_section == NULL)
4864 /* Set a flag that will be cleared later if we find a
4865 relocation value for this symbol. output_section
4866 is typically NULL for symbols satisfied by a shared
4868 unresolved_reloc = TRUE;
4870 relocation = (h->root.u.def.value
4871 + sec->output_section->vma
4872 + sec->output_offset);
4874 else if (h->root.type == bfd_link_hash_undefweak)
4876 else if (info->unresolved_syms_in_objects == RM_IGNORE
4877 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4879 else if (!info->relocatable
4880 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4883 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4884 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4885 if (!info->callbacks->undefined_symbol (info,
4886 h->root.root.string,
4889 rel->r_offset, err))
4892 sym_name = h->root.root.string;
4895 if (sec != NULL && discarded_section (sec))
4896 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4897 rel, 1, relend, howto, 0, contents);
4899 if (info->relocatable)
4902 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4903 if (r_type == R_SPU_ADD_PIC
4905 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4907 bfd_byte *loc = contents + rel->r_offset;
4913 is_ea_sym = (ea != NULL
4915 && sec->output_section == ea);
4917 /* If this symbol is in an overlay area, we may need to relocate
4918 to the overlay stub. */
4919 addend = rel->r_addend;
4922 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4923 contents, info)) != no_stub)
4925 unsigned int ovl = 0;
4926 struct got_entry *g, **head;
4928 if (stub_type != nonovl_stub)
4932 head = &h->got.glist;
4934 head = elf_local_got_ents (input_bfd) + r_symndx;
4936 for (g = *head; g != NULL; g = g->next)
4937 if (htab->params->ovly_flavour == ovly_soft_icache
4939 && g->br_addr == (rel->r_offset
4940 + input_section->output_offset
4941 + input_section->output_section->vma))
4942 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4947 relocation = g->stub_addr;
4952 /* For soft icache, encode the overlay index into addresses. */
4953 if (htab->params->ovly_flavour == ovly_soft_icache
4954 && (r_type == R_SPU_ADDR16_HI
4955 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4958 unsigned int ovl = overlay_index (sec);
4961 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4962 relocation += set_id << 18;
4967 if (htab->params->emit_fixups && !info->relocatable
4968 && (input_section->flags & SEC_ALLOC) != 0
4969 && r_type == R_SPU_ADDR32)
4972 offset = rel->r_offset + input_section->output_section->vma
4973 + input_section->output_offset;
4974 spu_elf_emit_fixup (output_bfd, info, offset);
4977 if (unresolved_reloc)
4979 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4983 /* ._ea is a special section that isn't allocated in SPU
4984 memory, but rather occupies space in PPU memory as
4985 part of an embedded ELF image. If this reloc is
4986 against a symbol defined in ._ea, then transform the
4987 reloc into an equivalent one without a symbol
4988 relative to the start of the ELF image. */
4989 rel->r_addend += (relocation
4991 + elf_section_data (ea)->this_hdr.sh_offset);
4992 rel->r_info = ELF32_R_INFO (0, r_type);
4994 emit_these_relocs = TRUE;
4998 unresolved_reloc = TRUE;
5000 if (unresolved_reloc
5001 && _bfd_elf_section_offset (output_bfd, info, input_section,
5002 rel->r_offset) != (bfd_vma) -1)
5004 (*_bfd_error_handler)
5005 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5007 bfd_get_section_name (input_bfd, input_section),
5008 (long) rel->r_offset,
5014 r = _bfd_final_link_relocate (howto,
5018 rel->r_offset, relocation, addend);
5020 if (r != bfd_reloc_ok)
5022 const char *msg = (const char *) 0;
5026 case bfd_reloc_overflow:
5027 if (!((*info->callbacks->reloc_overflow)
5028 (info, (h ? &h->root : NULL), sym_name, howto->name,
5029 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5033 case bfd_reloc_undefined:
5034 if (!((*info->callbacks->undefined_symbol)
5035 (info, sym_name, input_bfd, input_section,
5036 rel->r_offset, TRUE)))
5040 case bfd_reloc_outofrange:
5041 msg = _("internal error: out of range error");
5044 case bfd_reloc_notsupported:
5045 msg = _("internal error: unsupported relocation error");
5048 case bfd_reloc_dangerous:
5049 msg = _("internal error: dangerous error");
5053 msg = _("internal error: unknown error");
5058 if (!((*info->callbacks->warning)
5059 (info, msg, sym_name, input_bfd, input_section,
5068 && emit_these_relocs
5069 && !info->emitrelocations)
5071 Elf_Internal_Rela *wrel;
5072 Elf_Internal_Shdr *rel_hdr;
5074 wrel = rel = relocs;
5075 relend = relocs + input_section->reloc_count;
5076 for (; rel < relend; rel++)
5080 r_type = ELF32_R_TYPE (rel->r_info);
5081 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5084 input_section->reloc_count = wrel - relocs;
5085 /* Backflips for _bfd_elf_link_output_relocs. */
5086 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5087 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5095 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5096 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5101 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5104 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5105 const char *sym_name ATTRIBUTE_UNUSED,
5106 Elf_Internal_Sym *sym,
5107 asection *sym_sec ATTRIBUTE_UNUSED,
5108 struct elf_link_hash_entry *h)
5110 struct spu_link_hash_table *htab = spu_hash_table (info);
5112 if (!info->relocatable
5113 && htab->stub_sec != NULL
5115 && (h->root.type == bfd_link_hash_defined
5116 || h->root.type == bfd_link_hash_defweak)
5118 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5120 struct got_entry *g;
5122 for (g = h->got.glist; g != NULL; g = g->next)
5123 if (htab->params->ovly_flavour == ovly_soft_icache
5124 ? g->br_addr == g->stub_addr
5125 : g->addend == 0 && g->ovl == 0)
5127 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5128 (htab->stub_sec[0]->output_section->owner,
5129 htab->stub_sec[0]->output_section));
5130 sym->st_value = g->stub_addr;
5138 static int spu_plugin = 0;
5141 spu_elf_plugin (int val)
5146 /* Set ELF header e_type for plugins. */
5149 spu_elf_post_process_headers (bfd *abfd,
5150 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5154 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5156 i_ehdrp->e_type = ET_DYN;
5160 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5161 segments for overlays. */
5164 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5171 struct spu_link_hash_table *htab = spu_hash_table (info);
5172 extra = htab->num_overlays;
5178 sec = bfd_get_section_by_name (abfd, ".toe");
5179 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5185 /* Remove .toe section from other PT_LOAD segments and put it in
5186 a segment of its own. Put overlays in separate segments too. */
5189 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5192 struct elf_segment_map *m, *m_overlay;
5193 struct elf_segment_map **p, **p_overlay;
5199 toe = bfd_get_section_by_name (abfd, ".toe");
5200 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5201 if (m->p_type == PT_LOAD && m->count > 1)
5202 for (i = 0; i < m->count; i++)
5203 if ((s = m->sections[i]) == toe
5204 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5206 struct elf_segment_map *m2;
5209 if (i + 1 < m->count)
5211 amt = sizeof (struct elf_segment_map);
5212 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5213 m2 = bfd_zalloc (abfd, amt);
5216 m2->count = m->count - (i + 1);
5217 memcpy (m2->sections, m->sections + i + 1,
5218 m2->count * sizeof (m->sections[0]));
5219 m2->p_type = PT_LOAD;
5227 amt = sizeof (struct elf_segment_map);
5228 m2 = bfd_zalloc (abfd, amt);
5231 m2->p_type = PT_LOAD;
5233 m2->sections[0] = s;
5241 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5242 PT_LOAD segments. This can cause the .ovl.init section to be
5243 overwritten with the contents of some overlay segment. To work
5244 around this issue, we ensure that all PF_OVERLAY segments are
5245 sorted first amongst the program headers; this ensures that even
5246 with a broken loader, the .ovl.init section (which is not marked
5247 as PF_OVERLAY) will be placed into SPU local store on startup. */
5249 /* Move all overlay segments onto a separate list. */
5250 p = &elf_seg_map (abfd);
5251 p_overlay = &m_overlay;
5254 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5255 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5260 p_overlay = &m->next;
5267 /* Re-insert overlay segments at the head of the segment map. */
5268 *p_overlay = elf_seg_map (abfd);
5269 elf_seg_map (abfd) = m_overlay;
5274 /* Tweak the section type of .note.spu_name. */
5277 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5278 Elf_Internal_Shdr *hdr,
5281 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5282 hdr->sh_type = SHT_NOTE;
5286 /* Tweak phdrs before writing them out. */
5289 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5291 const struct elf_backend_data *bed;
5292 struct elf_obj_tdata *tdata;
5293 Elf_Internal_Phdr *phdr, *last;
5294 struct spu_link_hash_table *htab;
5301 bed = get_elf_backend_data (abfd);
5302 tdata = elf_tdata (abfd);
5304 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5305 htab = spu_hash_table (info);
5306 if (htab->num_overlays != 0)
5308 struct elf_segment_map *m;
5311 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5313 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5315 /* Mark this as an overlay header. */
5316 phdr[i].p_flags |= PF_OVERLAY;
5318 if (htab->ovtab != NULL && htab->ovtab->size != 0
5319 && htab->params->ovly_flavour != ovly_soft_icache)
5321 bfd_byte *p = htab->ovtab->contents;
5322 unsigned int off = o * 16 + 8;
5324 /* Write file_off into _ovly_table. */
5325 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5328 /* Soft-icache has its file offset put in .ovl.init. */
5329 if (htab->init != NULL && htab->init->size != 0)
5331 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5333 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5337 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5338 of 16. This should always be possible when using the standard
5339 linker scripts, but don't create overlapping segments if
5340 someone is playing games with linker scripts. */
5342 for (i = count; i-- != 0; )
5343 if (phdr[i].p_type == PT_LOAD)
5347 adjust = -phdr[i].p_filesz & 15;
5350 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5353 adjust = -phdr[i].p_memsz & 15;
5356 && phdr[i].p_filesz != 0
5357 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5358 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5361 if (phdr[i].p_filesz != 0)
5365 if (i == (unsigned int) -1)
5366 for (i = count; i-- != 0; )
5367 if (phdr[i].p_type == PT_LOAD)
5371 adjust = -phdr[i].p_filesz & 15;
5372 phdr[i].p_filesz += adjust;
5374 adjust = -phdr[i].p_memsz & 15;
5375 phdr[i].p_memsz += adjust;
5382 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5384 struct spu_link_hash_table *htab = spu_hash_table (info);
5385 if (htab->params->emit_fixups)
5387 asection *sfixup = htab->sfixup;
5388 int fixup_count = 0;
5392 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5396 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5399 /* Walk over each section attached to the input bfd. */
5400 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5402 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5405 /* If there aren't any relocs, then there's nothing more
5407 if ((isec->flags & SEC_ALLOC) == 0
5408 || (isec->flags & SEC_RELOC) == 0
5409 || isec->reloc_count == 0)
5412 /* Get the relocs. */
5414 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5416 if (internal_relocs == NULL)
5419 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5420 relocations. They are stored in a single word by
5421 saving the upper 28 bits of the address and setting the
5422 lower 4 bits to a bit mask of the words that have the
5423 relocation. BASE_END keeps track of the next quadword. */
5424 irela = internal_relocs;
5425 irelaend = irela + isec->reloc_count;
5427 for (; irela < irelaend; irela++)
5428 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5429 && irela->r_offset >= base_end)
5431 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5437 /* We always have a NULL fixup as a sentinel */
5438 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5439 if (!bfd_set_section_size (output_bfd, sfixup, size))
5441 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5442 if (sfixup->contents == NULL)
5448 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5449 #define TARGET_BIG_NAME "elf32-spu"
5450 #define ELF_ARCH bfd_arch_spu
5451 #define ELF_TARGET_ID SPU_ELF_DATA
5452 #define ELF_MACHINE_CODE EM_SPU
5453 /* This matches the alignment need for DMA. */
5454 #define ELF_MAXPAGESIZE 0x80
5455 #define elf_backend_rela_normal 1
5456 #define elf_backend_can_gc_sections 1
5458 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5459 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5460 #define elf_info_to_howto spu_elf_info_to_howto
5461 #define elf_backend_count_relocs spu_elf_count_relocs
5462 #define elf_backend_relocate_section spu_elf_relocate_section
5463 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5464 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5465 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5466 #define elf_backend_object_p spu_elf_object_p
5467 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5468 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5470 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5471 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5472 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5473 #define elf_backend_post_process_headers spu_elf_post_process_headers
5474 #define elf_backend_fake_sections spu_elf_fake_sections
5475 #define elf_backend_special_sections spu_elf_special_sections
5476 #define bfd_elf32_bfd_final_link spu_elf_final_link
5478 #include "elf32-target.h"