1 /* SPU specific support for 32-bit ELF
3 Copyright (C) 2006-2018 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
108 return (enum elf_spu_reloc_type) -1;
111 case BFD_RELOC_SPU_IMM10W:
113 case BFD_RELOC_SPU_IMM16W:
115 case BFD_RELOC_SPU_LO16:
116 return R_SPU_ADDR16_LO;
117 case BFD_RELOC_SPU_HI16:
118 return R_SPU_ADDR16_HI;
119 case BFD_RELOC_SPU_IMM18:
121 case BFD_RELOC_SPU_PCREL16:
123 case BFD_RELOC_SPU_IMM7:
125 case BFD_RELOC_SPU_IMM8:
127 case BFD_RELOC_SPU_PCREL9a:
129 case BFD_RELOC_SPU_PCREL9b:
131 case BFD_RELOC_SPU_IMM10:
132 return R_SPU_ADDR10I;
133 case BFD_RELOC_SPU_IMM16:
134 return R_SPU_ADDR16I;
137 case BFD_RELOC_32_PCREL:
139 case BFD_RELOC_SPU_PPU32:
141 case BFD_RELOC_SPU_PPU64:
143 case BFD_RELOC_SPU_ADD_PIC:
144 return R_SPU_ADD_PIC;
149 spu_elf_info_to_howto (bfd *abfd,
151 Elf_Internal_Rela *dst)
153 enum elf_spu_reloc_type r_type;
155 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
156 /* PR 17512: file: 90c2a92e. */
157 if (r_type >= R_SPU_max)
159 /* xgettext:c-format */
160 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
162 bfd_set_error (bfd_error_bad_value);
165 cache_ptr->howto = &elf_howto_table[(int) r_type];
169 static reloc_howto_type *
170 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
171 bfd_reloc_code_real_type code)
173 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
175 if (r_type == (enum elf_spu_reloc_type) -1)
178 return elf_howto_table + r_type;
181 static reloc_howto_type *
182 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
187 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
188 if (elf_howto_table[i].name != NULL
189 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
190 return &elf_howto_table[i];
195 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
197 static bfd_reloc_status_type
198 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
199 void *data, asection *input_section,
200 bfd *output_bfd, char **error_message)
202 bfd_size_type octets;
206 /* If this is a relocatable link (output_bfd test tells us), just
207 call the generic function. Any adjustment will be done at final
209 if (output_bfd != NULL)
210 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
211 input_section, output_bfd, error_message);
213 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
214 return bfd_reloc_outofrange;
215 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
217 /* Get symbol value. */
219 if (!bfd_is_com_section (symbol->section))
221 if (symbol->section->output_section)
222 val += symbol->section->output_section->vma;
224 val += reloc_entry->addend;
226 /* Make it pc-relative. */
227 val -= input_section->output_section->vma + input_section->output_offset;
230 if (val + 256 >= 512)
231 return bfd_reloc_overflow;
233 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
235 /* Move two high bits of value to REL9I and REL9 position.
236 The mask will take care of selecting the right field. */
237 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
238 insn &= ~reloc_entry->howto->dst_mask;
239 insn |= val & reloc_entry->howto->dst_mask;
240 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
245 spu_elf_new_section_hook (bfd *abfd, asection *sec)
247 if (!sec->used_by_bfd)
249 struct _spu_elf_section_data *sdata;
251 sdata = bfd_zalloc (abfd, sizeof (*sdata));
254 sec->used_by_bfd = sdata;
257 return _bfd_elf_new_section_hook (abfd, sec);
260 /* Set up overlay info for executables. */
263 spu_elf_object_p (bfd *abfd)
265 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
267 unsigned int i, num_ovl, num_buf;
268 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
269 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
270 Elf_Internal_Phdr *last_phdr = NULL;
272 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
273 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
278 if (last_phdr == NULL
279 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
282 for (j = 1; j < elf_numsections (abfd); j++)
284 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
286 if (ELF_SECTION_SIZE (shdr, phdr) != 0
287 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
289 asection *sec = shdr->bfd_section;
290 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
291 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
299 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
300 strip --strip-unneeded will not remove them. */
303 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
305 if (sym->name != NULL
306 && sym->section != bfd_abs_section_ptr
307 && strncmp (sym->name, "_EAR_", 5) == 0)
308 sym->flags |= BSF_KEEP;
311 /* SPU ELF linker hash table. */
313 struct spu_link_hash_table
315 struct elf_link_hash_table elf;
317 struct spu_elf_params *params;
319 /* Shortcuts to overlay sections. */
325 /* Count of stubs in each overlay section. */
326 unsigned int *stub_count;
328 /* The stub section for each overlay section. */
331 struct elf_link_hash_entry *ovly_entry[2];
333 /* Number of overlay buffers. */
334 unsigned int num_buf;
336 /* Total number of overlays. */
337 unsigned int num_overlays;
339 /* For soft icache. */
340 unsigned int line_size_log2;
341 unsigned int num_lines_log2;
342 unsigned int fromelem_size_log2;
344 /* How much memory we have. */
345 unsigned int local_store;
347 /* Count of overlay stubs needed in non-overlay area. */
348 unsigned int non_ovly_stub;
350 /* Pointer to the fixup section */
354 unsigned int stub_err : 1;
357 /* Hijack the generic got fields for overlay stub accounting. */
361 struct got_entry *next;
370 #define spu_hash_table(p) \
371 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
372 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
376 struct function_info *fun;
377 struct call_info *next;
379 unsigned int max_depth;
380 unsigned int is_tail : 1;
381 unsigned int is_pasted : 1;
382 unsigned int broken_cycle : 1;
383 unsigned int priority : 13;
388 /* List of functions called. Also branches to hot/cold part of
390 struct call_info *call_list;
391 /* For hot/cold part of function, point to owner. */
392 struct function_info *start;
393 /* Symbol at start of function. */
395 Elf_Internal_Sym *sym;
396 struct elf_link_hash_entry *h;
398 /* Function section. */
401 /* Where last called from, and number of sections called from. */
402 asection *last_caller;
403 unsigned int call_count;
404 /* Address range of (this part of) function. */
406 /* Offset where we found a store of lr, or -1 if none found. */
408 /* Offset where we found the stack adjustment insn. */
412 /* Distance from root of call tree. Tail and hot/cold branches
413 count as one deeper. We aren't counting stack frames here. */
415 /* Set if global symbol. */
416 unsigned int global : 1;
417 /* Set if known to be start of function (as distinct from a hunk
418 in hot/cold section. */
419 unsigned int is_func : 1;
420 /* Set if not a root node. */
421 unsigned int non_root : 1;
422 /* Flags used during call tree traversal. It's cheaper to replicate
423 the visit flags than have one which needs clearing after a traversal. */
424 unsigned int visit1 : 1;
425 unsigned int visit2 : 1;
426 unsigned int marking : 1;
427 unsigned int visit3 : 1;
428 unsigned int visit4 : 1;
429 unsigned int visit5 : 1;
430 unsigned int visit6 : 1;
431 unsigned int visit7 : 1;
434 struct spu_elf_stack_info
438 /* Variable size array describing functions, one per contiguous
439 address range belonging to a function. */
440 struct function_info fun[1];
443 static struct function_info *find_function (asection *, bfd_vma,
444 struct bfd_link_info *);
446 /* Create a spu ELF linker hash table. */
448 static struct bfd_link_hash_table *
449 spu_elf_link_hash_table_create (bfd *abfd)
451 struct spu_link_hash_table *htab;
453 htab = bfd_zmalloc (sizeof (*htab));
457 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
458 _bfd_elf_link_hash_newfunc,
459 sizeof (struct elf_link_hash_entry),
466 htab->elf.init_got_refcount.refcount = 0;
467 htab->elf.init_got_refcount.glist = NULL;
468 htab->elf.init_got_offset.offset = 0;
469 htab->elf.init_got_offset.glist = NULL;
470 return &htab->elf.root;
474 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
476 bfd_vma max_branch_log2;
478 struct spu_link_hash_table *htab = spu_hash_table (info);
479 htab->params = params;
480 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
481 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
483 /* For the software i-cache, we provide a "from" list whose size
484 is a power-of-two number of quadwords, big enough to hold one
485 byte per outgoing branch. Compute this number here. */
486 max_branch_log2 = bfd_log2 (htab->params->max_branch);
487 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
490 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
491 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
492 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
495 get_sym_h (struct elf_link_hash_entry **hp,
496 Elf_Internal_Sym **symp,
498 Elf_Internal_Sym **locsymsp,
499 unsigned long r_symndx,
502 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
504 if (r_symndx >= symtab_hdr->sh_info)
506 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
507 struct elf_link_hash_entry *h;
509 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
510 while (h->root.type == bfd_link_hash_indirect
511 || h->root.type == bfd_link_hash_warning)
512 h = (struct elf_link_hash_entry *) h->root.u.i.link;
522 asection *symsec = NULL;
523 if (h->root.type == bfd_link_hash_defined
524 || h->root.type == bfd_link_hash_defweak)
525 symsec = h->root.u.def.section;
531 Elf_Internal_Sym *sym;
532 Elf_Internal_Sym *locsyms = *locsymsp;
536 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
538 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
540 0, NULL, NULL, NULL);
545 sym = locsyms + r_symndx;
554 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
560 /* Create the note section if not already present. This is done early so
561 that the linker maps the sections to the right place in the output. */
564 spu_elf_create_sections (struct bfd_link_info *info)
566 struct spu_link_hash_table *htab = spu_hash_table (info);
569 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
570 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
575 /* Make SPU_PTNOTE_SPUNAME section. */
582 ibfd = info->input_bfds;
583 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
584 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
586 || !bfd_set_section_alignment (ibfd, s, 4))
589 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
590 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
591 size += (name_len + 3) & -4;
593 if (!bfd_set_section_size (ibfd, s, size))
596 data = bfd_zalloc (ibfd, size);
600 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
601 bfd_put_32 (ibfd, name_len, data + 4);
602 bfd_put_32 (ibfd, 1, data + 8);
603 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
604 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
605 bfd_get_filename (info->output_bfd), name_len);
609 if (htab->params->emit_fixups)
614 if (htab->elf.dynobj == NULL)
615 htab->elf.dynobj = ibfd;
616 ibfd = htab->elf.dynobj;
617 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
618 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
619 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
620 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
628 /* qsort predicate to sort sections by vma. */
631 sort_sections (const void *a, const void *b)
633 const asection *const *s1 = a;
634 const asection *const *s2 = b;
635 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
638 return delta < 0 ? -1 : 1;
640 return (*s1)->index - (*s2)->index;
643 /* Identify overlays in the output bfd, and number them.
644 Returns 0 on error, 1 if no overlays, 2 if overlays. */
647 spu_elf_find_overlays (struct bfd_link_info *info)
649 struct spu_link_hash_table *htab = spu_hash_table (info);
650 asection **alloc_sec;
651 unsigned int i, n, ovl_index, num_buf;
654 static const char *const entry_names[2][2] = {
655 { "__ovly_load", "__icache_br_handler" },
656 { "__ovly_return", "__icache_call_handler" }
659 if (info->output_bfd->section_count < 2)
663 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
664 if (alloc_sec == NULL)
667 /* Pick out all the alloced sections. */
668 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
669 if ((s->flags & SEC_ALLOC) != 0
670 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
680 /* Sort them by vma. */
681 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
683 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
684 if (htab->params->ovly_flavour == ovly_soft_icache)
686 unsigned int prev_buf = 0, set_id = 0;
688 /* Look for an overlapping vma to find the first overlay section. */
689 bfd_vma vma_start = 0;
691 for (i = 1; i < n; i++)
694 if (s->vma < ovl_end)
696 asection *s0 = alloc_sec[i - 1];
700 << (htab->num_lines_log2 + htab->line_size_log2)));
705 ovl_end = s->vma + s->size;
708 /* Now find any sections within the cache area. */
709 for (ovl_index = 0, num_buf = 0; i < n; i++)
712 if (s->vma >= ovl_end)
715 /* A section in an overlay area called .ovl.init is not
716 an overlay, in the sense that it might be loaded in
717 by the overlay manager, but rather the initial
718 section contents for the overlay buffer. */
719 if (strncmp (s->name, ".ovl.init", 9) != 0)
721 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
722 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
725 if ((s->vma - vma_start) & (htab->params->line_size - 1))
727 info->callbacks->einfo (_("%X%P: overlay section %pA "
728 "does not start on a cache line\n"),
730 bfd_set_error (bfd_error_bad_value);
733 else if (s->size > htab->params->line_size)
735 info->callbacks->einfo (_("%X%P: overlay section %pA "
736 "is larger than a cache line\n"),
738 bfd_set_error (bfd_error_bad_value);
742 alloc_sec[ovl_index++] = s;
743 spu_elf_section_data (s)->u.o.ovl_index
744 = (set_id << htab->num_lines_log2) + num_buf;
745 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
749 /* Ensure there are no more overlay sections. */
753 if (s->vma < ovl_end)
755 info->callbacks->einfo (_("%X%P: overlay section %pA "
756 "is not in cache area\n"),
758 bfd_set_error (bfd_error_bad_value);
762 ovl_end = s->vma + s->size;
767 /* Look for overlapping vmas. Any with overlap must be overlays.
768 Count them. Also count the number of overlay regions. */
769 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
772 if (s->vma < ovl_end)
774 asection *s0 = alloc_sec[i - 1];
776 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
779 if (strncmp (s0->name, ".ovl.init", 9) != 0)
781 alloc_sec[ovl_index] = s0;
782 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
783 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
786 ovl_end = s->vma + s->size;
788 if (strncmp (s->name, ".ovl.init", 9) != 0)
790 alloc_sec[ovl_index] = s;
791 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
792 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
793 if (s0->vma != s->vma)
795 /* xgettext:c-format */
796 info->callbacks->einfo (_("%X%P: overlay sections %pA "
797 "and %pA do not start at the "
800 bfd_set_error (bfd_error_bad_value);
803 if (ovl_end < s->vma + s->size)
804 ovl_end = s->vma + s->size;
808 ovl_end = s->vma + s->size;
812 htab->num_overlays = ovl_index;
813 htab->num_buf = num_buf;
814 htab->ovl_sec = alloc_sec;
819 for (i = 0; i < 2; i++)
822 struct elf_link_hash_entry *h;
824 name = entry_names[i][htab->params->ovly_flavour];
825 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
829 if (h->root.type == bfd_link_hash_new)
831 h->root.type = bfd_link_hash_undefined;
833 h->ref_regular_nonweak = 1;
836 htab->ovly_entry[i] = h;
842 /* Non-zero to use bra in overlay stubs rather than br. */
845 #define BRA 0x30000000
846 #define BRASL 0x31000000
847 #define BR 0x32000000
848 #define BRSL 0x33000000
849 #define NOP 0x40200000
850 #define LNOP 0x00200000
851 #define ILA 0x42000000
853 /* Return true for all relative and absolute branch instructions.
861 brhnz 00100011 0.. */
864 is_branch (const unsigned char *insn)
866 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
869 /* Return true for all indirect branch instructions.
877 bihnz 00100101 011 */
880 is_indirect_branch (const unsigned char *insn)
882 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
885 /* Return true for branch hint instructions.
890 is_hint (const unsigned char *insn)
892 return (insn[0] & 0xfc) == 0x10;
895 /* True if INPUT_SECTION might need overlay stubs. */
898 maybe_needs_stubs (asection *input_section)
900 /* No stubs for debug sections and suchlike. */
901 if ((input_section->flags & SEC_ALLOC) == 0)
904 /* No stubs for link-once sections that will be discarded. */
905 if (input_section->output_section == bfd_abs_section_ptr)
908 /* Don't create stubs for .eh_frame references. */
909 if (strcmp (input_section->name, ".eh_frame") == 0)
931 /* Return non-zero if this reloc symbol should go via an overlay stub.
932 Return 2 if the stub must be in non-overlay area. */
934 static enum _stub_type
935 needs_ovl_stub (struct elf_link_hash_entry *h,
936 Elf_Internal_Sym *sym,
938 asection *input_section,
939 Elf_Internal_Rela *irela,
941 struct bfd_link_info *info)
943 struct spu_link_hash_table *htab = spu_hash_table (info);
944 enum elf_spu_reloc_type r_type;
945 unsigned int sym_type;
946 bfd_boolean branch, hint, call;
947 enum _stub_type ret = no_stub;
951 || sym_sec->output_section == bfd_abs_section_ptr
952 || spu_elf_section_data (sym_sec->output_section) == NULL)
957 /* Ensure no stubs for user supplied overlay manager syms. */
958 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
961 /* setjmp always goes via an overlay stub, because then the return
962 and hence the longjmp goes via __ovly_return. That magically
963 makes setjmp/longjmp between overlays work. */
964 if (strncmp (h->root.root.string, "setjmp", 6) == 0
965 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
972 sym_type = ELF_ST_TYPE (sym->st_info);
974 r_type = ELF32_R_TYPE (irela->r_info);
978 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
980 if (contents == NULL)
983 if (!bfd_get_section_contents (input_section->owner,
990 contents += irela->r_offset;
992 branch = is_branch (contents);
993 hint = is_hint (contents);
996 call = (contents[0] & 0xfd) == 0x31;
998 && sym_type != STT_FUNC
1001 /* It's common for people to write assembly and forget
1002 to give function symbols the right type. Handle
1003 calls to such symbols, but warn so that (hopefully)
1004 people will fix their code. We need the symbol
1005 type to be correct to distinguish function pointer
1006 initialisation from other pointer initialisations. */
1007 const char *sym_name;
1010 sym_name = h->root.root.string;
1013 Elf_Internal_Shdr *symtab_hdr;
1014 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1015 sym_name = bfd_elf_sym_name (input_section->owner,
1021 /* xgettext:c-format */
1022 (_("warning: call to non-function symbol %s defined in %pB"),
1023 sym_name, sym_sec->owner);
1029 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1030 || (sym_type != STT_FUNC
1031 && !(branch || hint)
1032 && (sym_sec->flags & SEC_CODE) == 0))
1035 /* Usually, symbols in non-overlay sections don't need stubs. */
1036 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1037 && !htab->params->non_overlay_stubs)
1040 /* A reference from some other section to a symbol in an overlay
1041 section needs a stub. */
1042 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1043 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1045 unsigned int lrlive = 0;
1047 lrlive = (contents[1] & 0x70) >> 4;
1049 if (!lrlive && (call || sym_type == STT_FUNC))
1050 ret = call_ovl_stub;
1052 ret = br000_ovl_stub + lrlive;
1055 /* If this insn isn't a branch then we are possibly taking the
1056 address of a function and passing it out somehow. Soft-icache code
1057 always generates inline code to do indirect branches. */
1058 if (!(branch || hint)
1059 && sym_type == STT_FUNC
1060 && htab->params->ovly_flavour != ovly_soft_icache)
1067 count_stub (struct spu_link_hash_table *htab,
1070 enum _stub_type stub_type,
1071 struct elf_link_hash_entry *h,
1072 const Elf_Internal_Rela *irela)
1074 unsigned int ovl = 0;
1075 struct got_entry *g, **head;
1078 /* If this instruction is a branch or call, we need a stub
1079 for it. One stub per function per overlay.
1080 If it isn't a branch, then we are taking the address of
1081 this function so need a stub in the non-overlay area
1082 for it. One stub per function. */
1083 if (stub_type != nonovl_stub)
1084 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1087 head = &h->got.glist;
1090 if (elf_local_got_ents (ibfd) == NULL)
1092 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1093 * sizeof (*elf_local_got_ents (ibfd)));
1094 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1095 if (elf_local_got_ents (ibfd) == NULL)
1098 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1101 if (htab->params->ovly_flavour == ovly_soft_icache)
1103 htab->stub_count[ovl] += 1;
1109 addend = irela->r_addend;
1113 struct got_entry *gnext;
1115 for (g = *head; g != NULL; g = g->next)
1116 if (g->addend == addend && g->ovl == 0)
1121 /* Need a new non-overlay area stub. Zap other stubs. */
1122 for (g = *head; g != NULL; g = gnext)
1125 if (g->addend == addend)
1127 htab->stub_count[g->ovl] -= 1;
1135 for (g = *head; g != NULL; g = g->next)
1136 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1142 g = bfd_malloc (sizeof *g);
1147 g->stub_addr = (bfd_vma) -1;
1151 htab->stub_count[ovl] += 1;
1157 /* Support two sizes of overlay stubs, a slower more compact stub of two
1158 instructions, and a faster stub of four instructions.
1159 Soft-icache stubs are four or eight words. */
1162 ovl_stub_size (struct spu_elf_params *params)
1164 return 16 << params->ovly_flavour >> params->compact_stub;
1168 ovl_stub_size_log2 (struct spu_elf_params *params)
1170 return 4 + params->ovly_flavour - params->compact_stub;
1173 /* Two instruction overlay stubs look like:
1175 brsl $75,__ovly_load
1176 .word target_ovl_and_address
1178 ovl_and_address is a word with the overlay number in the top 14 bits
1179 and local store address in the bottom 18 bits.
1181 Four instruction overlay stubs look like:
1185 ila $79,target_address
1188 Software icache stubs are:
1192 .word lrlive_branchlocalstoreaddr;
1193 brasl $75,__icache_br_handler
1198 build_stub (struct bfd_link_info *info,
1201 enum _stub_type stub_type,
1202 struct elf_link_hash_entry *h,
1203 const Elf_Internal_Rela *irela,
1207 struct spu_link_hash_table *htab = spu_hash_table (info);
1208 unsigned int ovl, dest_ovl, set_id;
1209 struct got_entry *g, **head;
1211 bfd_vma addend, from, to, br_dest, patt;
1212 unsigned int lrlive;
1215 if (stub_type != nonovl_stub)
1216 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1219 head = &h->got.glist;
1221 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1225 addend = irela->r_addend;
1227 if (htab->params->ovly_flavour == ovly_soft_icache)
1229 g = bfd_malloc (sizeof *g);
1235 g->br_addr = (irela->r_offset
1236 + isec->output_offset
1237 + isec->output_section->vma);
1243 for (g = *head; g != NULL; g = g->next)
1244 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1249 if (g->ovl == 0 && ovl != 0)
1252 if (g->stub_addr != (bfd_vma) -1)
1256 sec = htab->stub_sec[ovl];
1257 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1258 from = sec->size + sec->output_offset + sec->output_section->vma;
1259 g->stub_addr = from;
1260 to = (htab->ovly_entry[0]->root.u.def.value
1261 + htab->ovly_entry[0]->root.u.def.section->output_offset
1262 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1264 if (((dest | to | from) & 3) != 0)
1269 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1271 if (htab->params->ovly_flavour == ovly_normal
1272 && !htab->params->compact_stub)
1274 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1275 sec->contents + sec->size);
1276 bfd_put_32 (sec->owner, LNOP,
1277 sec->contents + sec->size + 4);
1278 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1279 sec->contents + sec->size + 8);
1281 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1282 sec->contents + sec->size + 12);
1284 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1285 sec->contents + sec->size + 12);
1287 else if (htab->params->ovly_flavour == ovly_normal
1288 && htab->params->compact_stub)
1291 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1292 sec->contents + sec->size);
1294 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1295 sec->contents + sec->size);
1296 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1297 sec->contents + sec->size + 4);
1299 else if (htab->params->ovly_flavour == ovly_soft_icache
1300 && htab->params->compact_stub)
1303 if (stub_type == nonovl_stub)
1305 else if (stub_type == call_ovl_stub)
1306 /* A brsl makes lr live and *(*sp+16) is live.
1307 Tail calls have the same liveness. */
1309 else if (!htab->params->lrlive_analysis)
1310 /* Assume stack frame and lr save. */
1312 else if (irela != NULL)
1314 /* Analyse branch instructions. */
1315 struct function_info *caller;
1318 caller = find_function (isec, irela->r_offset, info);
1319 if (caller->start == NULL)
1320 off = irela->r_offset;
1323 struct function_info *found = NULL;
1325 /* Find the earliest piece of this function that
1326 has frame adjusting instructions. We might
1327 see dynamic frame adjustment (eg. for alloca)
1328 in some later piece, but functions using
1329 alloca always set up a frame earlier. Frame
1330 setup instructions are always in one piece. */
1331 if (caller->lr_store != (bfd_vma) -1
1332 || caller->sp_adjust != (bfd_vma) -1)
1334 while (caller->start != NULL)
1336 caller = caller->start;
1337 if (caller->lr_store != (bfd_vma) -1
1338 || caller->sp_adjust != (bfd_vma) -1)
1346 if (off > caller->sp_adjust)
1348 if (off > caller->lr_store)
1349 /* Only *(*sp+16) is live. */
1352 /* If no lr save, then we must be in a
1353 leaf function with a frame.
1354 lr is still live. */
1357 else if (off > caller->lr_store)
1359 /* Between lr save and stack adjust. */
1361 /* This should never happen since prologues won't
1366 /* On entry to function. */
1369 if (stub_type != br000_ovl_stub
1370 && lrlive != stub_type - br000_ovl_stub)
1371 /* xgettext:c-format */
1372 info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1373 "from analysis (%u)\n"),
1374 isec, irela->r_offset, lrlive,
1375 stub_type - br000_ovl_stub);
1378 /* If given lrlive info via .brinfo, use it. */
1379 if (stub_type > br000_ovl_stub)
1380 lrlive = stub_type - br000_ovl_stub;
1383 to = (htab->ovly_entry[1]->root.u.def.value
1384 + htab->ovly_entry[1]->root.u.def.section->output_offset
1385 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1387 /* The branch that uses this stub goes to stub_addr + 4. We'll
1388 set up an xor pattern that can be used by the icache manager
1389 to modify this branch to go directly to its destination. */
1391 br_dest = g->stub_addr;
1394 /* Except in the case of _SPUEAR_ stubs, the branch in
1395 question is the one in the stub itself. */
1396 BFD_ASSERT (stub_type == nonovl_stub);
1397 g->br_addr = g->stub_addr;
1401 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1402 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1403 sec->contents + sec->size);
1404 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1405 sec->contents + sec->size + 4);
1406 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1407 sec->contents + sec->size + 8);
1408 patt = dest ^ br_dest;
1409 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1410 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1411 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1412 sec->contents + sec->size + 12);
1415 /* Extra space for linked list entries. */
1421 sec->size += ovl_stub_size (htab->params);
1423 if (htab->params->emit_stub_syms)
1429 len = 8 + sizeof (".ovl_call.") - 1;
1431 len += strlen (h->root.root.string);
1436 add = (int) irela->r_addend & 0xffffffff;
1439 name = bfd_malloc (len + 1);
1443 sprintf (name, "%08x.ovl_call.", g->ovl);
1445 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1447 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1448 dest_sec->id & 0xffffffff,
1449 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1451 sprintf (name + len - 9, "+%x", add);
1453 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1457 if (h->root.type == bfd_link_hash_new)
1459 h->root.type = bfd_link_hash_defined;
1460 h->root.u.def.section = sec;
1461 h->size = ovl_stub_size (htab->params);
1462 h->root.u.def.value = sec->size - h->size;
1466 h->ref_regular_nonweak = 1;
1467 h->forced_local = 1;
1475 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1479 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1481 /* Symbols starting with _SPUEAR_ need a stub because they may be
1482 invoked by the PPU. */
1483 struct bfd_link_info *info = inf;
1484 struct spu_link_hash_table *htab = spu_hash_table (info);
1487 if ((h->root.type == bfd_link_hash_defined
1488 || h->root.type == bfd_link_hash_defweak)
1490 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1491 && (sym_sec = h->root.u.def.section) != NULL
1492 && sym_sec->output_section != bfd_abs_section_ptr
1493 && spu_elf_section_data (sym_sec->output_section) != NULL
1494 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1495 || htab->params->non_overlay_stubs))
1497 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1504 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1506 /* Symbols starting with _SPUEAR_ need a stub because they may be
1507 invoked by the PPU. */
1508 struct bfd_link_info *info = inf;
1509 struct spu_link_hash_table *htab = spu_hash_table (info);
1512 if ((h->root.type == bfd_link_hash_defined
1513 || h->root.type == bfd_link_hash_defweak)
1515 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1516 && (sym_sec = h->root.u.def.section) != NULL
1517 && sym_sec->output_section != bfd_abs_section_ptr
1518 && spu_elf_section_data (sym_sec->output_section) != NULL
1519 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1520 || htab->params->non_overlay_stubs))
1522 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1523 h->root.u.def.value, sym_sec);
1529 /* Size or build stubs. */
1532 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1534 struct spu_link_hash_table *htab = spu_hash_table (info);
1537 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1539 extern const bfd_target spu_elf32_vec;
1540 Elf_Internal_Shdr *symtab_hdr;
1542 Elf_Internal_Sym *local_syms = NULL;
1544 if (ibfd->xvec != &spu_elf32_vec)
1547 /* We'll need the symbol table in a second. */
1548 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1549 if (symtab_hdr->sh_info == 0)
1552 /* Walk over each section attached to the input bfd. */
1553 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1555 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1557 /* If there aren't any relocs, then there's nothing more to do. */
1558 if ((isec->flags & SEC_RELOC) == 0
1559 || isec->reloc_count == 0)
1562 if (!maybe_needs_stubs (isec))
1565 /* Get the relocs. */
1566 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1568 if (internal_relocs == NULL)
1569 goto error_ret_free_local;
1571 /* Now examine each relocation. */
1572 irela = internal_relocs;
1573 irelaend = irela + isec->reloc_count;
1574 for (; irela < irelaend; irela++)
1576 enum elf_spu_reloc_type r_type;
1577 unsigned int r_indx;
1579 Elf_Internal_Sym *sym;
1580 struct elf_link_hash_entry *h;
1581 enum _stub_type stub_type;
1583 r_type = ELF32_R_TYPE (irela->r_info);
1584 r_indx = ELF32_R_SYM (irela->r_info);
1586 if (r_type >= R_SPU_max)
1588 bfd_set_error (bfd_error_bad_value);
1589 error_ret_free_internal:
1590 if (elf_section_data (isec)->relocs != internal_relocs)
1591 free (internal_relocs);
1592 error_ret_free_local:
1593 if (local_syms != NULL
1594 && (symtab_hdr->contents
1595 != (unsigned char *) local_syms))
1600 /* Determine the reloc target section. */
1601 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1602 goto error_ret_free_internal;
1604 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1606 if (stub_type == no_stub)
1608 else if (stub_type == stub_error)
1609 goto error_ret_free_internal;
1611 if (htab->stub_count == NULL)
1614 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1615 htab->stub_count = bfd_zmalloc (amt);
1616 if (htab->stub_count == NULL)
1617 goto error_ret_free_internal;
1622 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1623 goto error_ret_free_internal;
1630 dest = h->root.u.def.value;
1632 dest = sym->st_value;
1633 dest += irela->r_addend;
1634 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1636 goto error_ret_free_internal;
1640 /* We're done with the internal relocs, free them. */
1641 if (elf_section_data (isec)->relocs != internal_relocs)
1642 free (internal_relocs);
1645 if (local_syms != NULL
1646 && symtab_hdr->contents != (unsigned char *) local_syms)
1648 if (!info->keep_memory)
1651 symtab_hdr->contents = (unsigned char *) local_syms;
1658 /* Allocate space for overlay call and return stubs.
1659 Return 0 on error, 1 if no overlays, 2 otherwise. */
1662 spu_elf_size_stubs (struct bfd_link_info *info)
1664 struct spu_link_hash_table *htab;
1671 if (!process_stubs (info, FALSE))
1674 htab = spu_hash_table (info);
1675 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1679 ibfd = info->input_bfds;
1680 if (htab->stub_count != NULL)
1682 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1683 htab->stub_sec = bfd_zmalloc (amt);
1684 if (htab->stub_sec == NULL)
1687 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1688 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1689 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1690 htab->stub_sec[0] = stub;
1692 || !bfd_set_section_alignment (ibfd, stub,
1693 ovl_stub_size_log2 (htab->params)))
1695 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1696 if (htab->params->ovly_flavour == ovly_soft_icache)
1697 /* Extra space for linked list entries. */
1698 stub->size += htab->stub_count[0] * 16;
1700 for (i = 0; i < htab->num_overlays; ++i)
1702 asection *osec = htab->ovl_sec[i];
1703 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1704 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1705 htab->stub_sec[ovl] = stub;
1707 || !bfd_set_section_alignment (ibfd, stub,
1708 ovl_stub_size_log2 (htab->params)))
1710 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1714 if (htab->params->ovly_flavour == ovly_soft_icache)
1716 /* Space for icache manager tables.
1717 a) Tag array, one quadword per cache line.
1718 b) Rewrite "to" list, one quadword per cache line.
1719 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1720 a power-of-two number of full quadwords) per cache line. */
1723 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1724 if (htab->ovtab == NULL
1725 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1728 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1729 << htab->num_lines_log2;
1731 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1732 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1733 if (htab->init == NULL
1734 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1737 htab->init->size = 16;
1739 else if (htab->stub_count == NULL)
1743 /* htab->ovtab consists of two arrays.
1753 . } _ovly_buf_table[];
1756 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1757 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1758 if (htab->ovtab == NULL
1759 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1762 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1765 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1766 if (htab->toe == NULL
1767 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1769 htab->toe->size = 16;
1774 /* Called from ld to place overlay manager data sections. This is done
1775 after the overlay manager itself is loaded, mainly so that the
1776 linker's htab->init section is placed after any other .ovl.init
1780 spu_elf_place_overlay_data (struct bfd_link_info *info)
1782 struct spu_link_hash_table *htab = spu_hash_table (info);
1785 if (htab->stub_sec != NULL)
1787 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1789 for (i = 0; i < htab->num_overlays; ++i)
1791 asection *osec = htab->ovl_sec[i];
1792 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1793 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1797 if (htab->params->ovly_flavour == ovly_soft_icache)
1798 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1800 if (htab->ovtab != NULL)
1802 const char *ovout = ".data";
1803 if (htab->params->ovly_flavour == ovly_soft_icache)
1805 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1808 if (htab->toe != NULL)
1809 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1812 /* Functions to handle embedded spu_ovl.o object. */
1815 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1821 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1827 struct _ovl_stream *os;
1831 os = (struct _ovl_stream *) stream;
1832 max = (const char *) os->end - (const char *) os->start;
1834 if ((ufile_ptr) offset >= max)
1838 if (count > max - offset)
1839 count = max - offset;
1841 memcpy (buf, (const char *) os->start + offset, count);
1846 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1850 struct _ovl_stream *os = (struct _ovl_stream *) stream;
1852 memset (sb, 0, sizeof (*sb));
1853 sb->st_size = (const char *) os->end - (const char *) os->start;
1858 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1860 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1867 return *ovl_bfd != NULL;
1871 overlay_index (asection *sec)
1874 || sec->output_section == bfd_abs_section_ptr)
1876 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1879 /* Define an STT_OBJECT symbol. */
1881 static struct elf_link_hash_entry *
1882 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1884 struct elf_link_hash_entry *h;
1886 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1890 if (h->root.type != bfd_link_hash_defined
1893 h->root.type = bfd_link_hash_defined;
1894 h->root.u.def.section = htab->ovtab;
1895 h->type = STT_OBJECT;
1898 h->ref_regular_nonweak = 1;
1901 else if (h->root.u.def.section->owner != NULL)
1903 /* xgettext:c-format */
1904 _bfd_error_handler (_("%pB is not allowed to define %s"),
1905 h->root.u.def.section->owner,
1906 h->root.root.string);
1907 bfd_set_error (bfd_error_bad_value);
1912 _bfd_error_handler (_("you are not allowed to define %s in a script"),
1913 h->root.root.string);
1914 bfd_set_error (bfd_error_bad_value);
1921 /* Fill in all stubs and the overlay tables. */
1924 spu_elf_build_stubs (struct bfd_link_info *info)
1926 struct spu_link_hash_table *htab = spu_hash_table (info);
1927 struct elf_link_hash_entry *h;
1933 if (htab->num_overlays != 0)
1935 for (i = 0; i < 2; i++)
1937 h = htab->ovly_entry[i];
1939 && (h->root.type == bfd_link_hash_defined
1940 || h->root.type == bfd_link_hash_defweak)
1943 s = h->root.u.def.section->output_section;
1944 if (spu_elf_section_data (s)->u.o.ovl_index)
1946 _bfd_error_handler (_("%s in overlay section"),
1947 h->root.root.string);
1948 bfd_set_error (bfd_error_bad_value);
1955 if (htab->stub_sec != NULL)
1957 for (i = 0; i <= htab->num_overlays; i++)
1958 if (htab->stub_sec[i]->size != 0)
1960 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1961 htab->stub_sec[i]->size);
1962 if (htab->stub_sec[i]->contents == NULL)
1964 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1965 htab->stub_sec[i]->size = 0;
1968 /* Fill in all the stubs. */
1969 process_stubs (info, TRUE);
1970 if (!htab->stub_err)
1971 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1975 _bfd_error_handler (_("overlay stub relocation overflow"));
1976 bfd_set_error (bfd_error_bad_value);
1980 for (i = 0; i <= htab->num_overlays; i++)
1982 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1984 _bfd_error_handler (_("stubs don't match calculated size"));
1985 bfd_set_error (bfd_error_bad_value);
1988 htab->stub_sec[i]->rawsize = 0;
1992 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1995 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1996 if (htab->ovtab->contents == NULL)
1999 p = htab->ovtab->contents;
2000 if (htab->params->ovly_flavour == ovly_soft_icache)
2004 h = define_ovtab_symbol (htab, "__icache_tag_array");
2007 h->root.u.def.value = 0;
2008 h->size = 16 << htab->num_lines_log2;
2011 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2014 h->root.u.def.value = 16 << htab->num_lines_log2;
2015 h->root.u.def.section = bfd_abs_section_ptr;
2017 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2020 h->root.u.def.value = off;
2021 h->size = 16 << htab->num_lines_log2;
2024 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2027 h->root.u.def.value = 16 << htab->num_lines_log2;
2028 h->root.u.def.section = bfd_abs_section_ptr;
2030 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2033 h->root.u.def.value = off;
2034 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2037 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2040 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2041 + htab->num_lines_log2);
2042 h->root.u.def.section = bfd_abs_section_ptr;
2044 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2047 h->root.u.def.value = htab->fromelem_size_log2;
2048 h->root.u.def.section = bfd_abs_section_ptr;
2050 h = define_ovtab_symbol (htab, "__icache_base");
2053 h->root.u.def.value = htab->ovl_sec[0]->vma;
2054 h->root.u.def.section = bfd_abs_section_ptr;
2055 h->size = htab->num_buf << htab->line_size_log2;
2057 h = define_ovtab_symbol (htab, "__icache_linesize");
2060 h->root.u.def.value = 1 << htab->line_size_log2;
2061 h->root.u.def.section = bfd_abs_section_ptr;
2063 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2066 h->root.u.def.value = htab->line_size_log2;
2067 h->root.u.def.section = bfd_abs_section_ptr;
2069 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2072 h->root.u.def.value = -htab->line_size_log2;
2073 h->root.u.def.section = bfd_abs_section_ptr;
2075 h = define_ovtab_symbol (htab, "__icache_cachesize");
2078 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2079 h->root.u.def.section = bfd_abs_section_ptr;
2081 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2084 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2085 h->root.u.def.section = bfd_abs_section_ptr;
2087 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2090 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2091 h->root.u.def.section = bfd_abs_section_ptr;
2093 if (htab->init != NULL && htab->init->size != 0)
2095 htab->init->contents = bfd_zalloc (htab->init->owner,
2097 if (htab->init->contents == NULL)
2100 h = define_ovtab_symbol (htab, "__icache_fileoff");
2103 h->root.u.def.value = 0;
2104 h->root.u.def.section = htab->init;
2110 /* Write out _ovly_table. */
2111 /* set low bit of .size to mark non-overlay area as present. */
2113 obfd = htab->ovtab->output_section->owner;
2114 for (s = obfd->sections; s != NULL; s = s->next)
2116 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2120 unsigned long off = ovl_index * 16;
2121 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2123 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2124 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2126 /* file_off written later in spu_elf_modify_program_headers. */
2127 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2131 h = define_ovtab_symbol (htab, "_ovly_table");
2134 h->root.u.def.value = 16;
2135 h->size = htab->num_overlays * 16;
2137 h = define_ovtab_symbol (htab, "_ovly_table_end");
2140 h->root.u.def.value = htab->num_overlays * 16 + 16;
2143 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2146 h->root.u.def.value = htab->num_overlays * 16 + 16;
2147 h->size = htab->num_buf * 4;
2149 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2152 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2156 h = define_ovtab_symbol (htab, "_EAR_");
2159 h->root.u.def.section = htab->toe;
2160 h->root.u.def.value = 0;
2166 /* Check that all loadable section VMAs lie in the range
2167 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2170 spu_elf_check_vma (struct bfd_link_info *info)
2172 struct elf_segment_map *m;
2174 struct spu_link_hash_table *htab = spu_hash_table (info);
2175 bfd *abfd = info->output_bfd;
2176 bfd_vma hi = htab->params->local_store_hi;
2177 bfd_vma lo = htab->params->local_store_lo;
2179 htab->local_store = hi + 1 - lo;
2181 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2182 if (m->p_type == PT_LOAD)
2183 for (i = 0; i < m->count; i++)
2184 if (m->sections[i]->size != 0
2185 && (m->sections[i]->vma < lo
2186 || m->sections[i]->vma > hi
2187 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2188 return m->sections[i];
2193 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2194 Search for stack adjusting insns, and return the sp delta.
2195 If a store of lr is found save the instruction offset to *LR_STORE.
2196 If a stack adjusting instruction is found, save that offset to
2200 find_function_stack_adjust (asection *sec,
2207 memset (reg, 0, sizeof (reg));
2208 for ( ; offset + 4 <= sec->size; offset += 4)
2210 unsigned char buf[4];
2214 /* Assume no relocs on stack adjusing insns. */
2215 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2219 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2221 if (buf[0] == 0x24 /* stqd */)
2223 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2228 /* Partly decoded immediate field. */
2229 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2231 if (buf[0] == 0x1c /* ai */)
2234 imm = (imm ^ 0x200) - 0x200;
2235 reg[rt] = reg[ra] + imm;
2237 if (rt == 1 /* sp */)
2241 *sp_adjust = offset;
2245 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2247 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2249 reg[rt] = reg[ra] + reg[rb];
2254 *sp_adjust = offset;
2258 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2260 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2262 reg[rt] = reg[rb] - reg[ra];
2267 *sp_adjust = offset;
2271 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2273 if (buf[0] >= 0x42 /* ila */)
2274 imm |= (buf[0] & 1) << 17;
2279 if (buf[0] == 0x40 /* il */)
2281 if ((buf[1] & 0x80) == 0)
2283 imm = (imm ^ 0x8000) - 0x8000;
2285 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2291 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2293 reg[rt] |= imm & 0xffff;
2296 else if (buf[0] == 0x04 /* ori */)
2299 imm = (imm ^ 0x200) - 0x200;
2300 reg[rt] = reg[ra] | imm;
2303 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2305 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2306 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2307 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2308 | ((imm & 0x1000) ? 0x000000ff : 0));
2311 else if (buf[0] == 0x16 /* andbi */)
2317 reg[rt] = reg[ra] & imm;
2320 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2322 /* Used in pic reg load. Say rt is trashed. Won't be used
2323 in stack adjust, but we need to continue past this branch. */
2327 else if (is_branch (buf) || is_indirect_branch (buf))
2328 /* If we hit a branch then we must be out of the prologue. */
2335 /* qsort predicate to sort symbols by section and value. */
2337 static Elf_Internal_Sym *sort_syms_syms;
2338 static asection **sort_syms_psecs;
2341 sort_syms (const void *a, const void *b)
2343 Elf_Internal_Sym *const *s1 = a;
2344 Elf_Internal_Sym *const *s2 = b;
2345 asection *sec1,*sec2;
2346 bfd_signed_vma delta;
2348 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2349 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2352 return sec1->index - sec2->index;
2354 delta = (*s1)->st_value - (*s2)->st_value;
2356 return delta < 0 ? -1 : 1;
2358 delta = (*s2)->st_size - (*s1)->st_size;
2360 return delta < 0 ? -1 : 1;
2362 return *s1 < *s2 ? -1 : 1;
2365 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2366 entries for section SEC. */
2368 static struct spu_elf_stack_info *
2369 alloc_stack_info (asection *sec, int max_fun)
2371 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2374 amt = sizeof (struct spu_elf_stack_info);
2375 amt += (max_fun - 1) * sizeof (struct function_info);
2376 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2377 if (sec_data->u.i.stack_info != NULL)
2378 sec_data->u.i.stack_info->max_fun = max_fun;
2379 return sec_data->u.i.stack_info;
2382 /* Add a new struct function_info describing a (part of a) function
2383 starting at SYM_H. Keep the array sorted by address. */
2385 static struct function_info *
2386 maybe_insert_function (asection *sec,
2389 bfd_boolean is_func)
2391 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2392 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2398 sinfo = alloc_stack_info (sec, 20);
2405 Elf_Internal_Sym *sym = sym_h;
2406 off = sym->st_value;
2407 size = sym->st_size;
2411 struct elf_link_hash_entry *h = sym_h;
2412 off = h->root.u.def.value;
2416 for (i = sinfo->num_fun; --i >= 0; )
2417 if (sinfo->fun[i].lo <= off)
2422 /* Don't add another entry for an alias, but do update some
2424 if (sinfo->fun[i].lo == off)
2426 /* Prefer globals over local syms. */
2427 if (global && !sinfo->fun[i].global)
2429 sinfo->fun[i].global = TRUE;
2430 sinfo->fun[i].u.h = sym_h;
2433 sinfo->fun[i].is_func = TRUE;
2434 return &sinfo->fun[i];
2436 /* Ignore a zero-size symbol inside an existing function. */
2437 else if (sinfo->fun[i].hi > off && size == 0)
2438 return &sinfo->fun[i];
2441 if (sinfo->num_fun >= sinfo->max_fun)
2443 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2444 bfd_size_type old = amt;
2446 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2447 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2448 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2449 sinfo = bfd_realloc (sinfo, amt);
2452 memset ((char *) sinfo + old, 0, amt - old);
2453 sec_data->u.i.stack_info = sinfo;
2456 if (++i < sinfo->num_fun)
2457 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2458 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2459 sinfo->fun[i].is_func = is_func;
2460 sinfo->fun[i].global = global;
2461 sinfo->fun[i].sec = sec;
2463 sinfo->fun[i].u.h = sym_h;
2465 sinfo->fun[i].u.sym = sym_h;
2466 sinfo->fun[i].lo = off;
2467 sinfo->fun[i].hi = off + size;
2468 sinfo->fun[i].lr_store = -1;
2469 sinfo->fun[i].sp_adjust = -1;
2470 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2471 &sinfo->fun[i].lr_store,
2472 &sinfo->fun[i].sp_adjust);
2473 sinfo->num_fun += 1;
2474 return &sinfo->fun[i];
2477 /* Return the name of FUN. */
2480 func_name (struct function_info *fun)
2484 Elf_Internal_Shdr *symtab_hdr;
2486 while (fun->start != NULL)
2490 return fun->u.h->root.root.string;
2493 if (fun->u.sym->st_name == 0)
2495 size_t len = strlen (sec->name);
2496 char *name = bfd_malloc (len + 10);
2499 sprintf (name, "%s+%lx", sec->name,
2500 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2504 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2505 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2508 /* Read the instruction at OFF in SEC. Return true iff the instruction
2509 is a nop, lnop, or stop 0 (all zero insn). */
2512 is_nop (asection *sec, bfd_vma off)
2514 unsigned char insn[4];
2516 if (off + 4 > sec->size
2517 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2519 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2521 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2526 /* Extend the range of FUN to cover nop padding up to LIMIT.
2527 Return TRUE iff some instruction other than a NOP was found. */
2530 insns_at_end (struct function_info *fun, bfd_vma limit)
2532 bfd_vma off = (fun->hi + 3) & -4;
2534 while (off < limit && is_nop (fun->sec, off))
2545 /* Check and fix overlapping function ranges. Return TRUE iff there
2546 are gaps in the current info we have about functions in SEC. */
2549 check_function_ranges (asection *sec, struct bfd_link_info *info)
2551 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2552 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2554 bfd_boolean gaps = FALSE;
2559 for (i = 1; i < sinfo->num_fun; i++)
2560 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2562 /* Fix overlapping symbols. */
2563 const char *f1 = func_name (&sinfo->fun[i - 1]);
2564 const char *f2 = func_name (&sinfo->fun[i]);
2566 /* xgettext:c-format */
2567 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2568 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2570 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2573 if (sinfo->num_fun == 0)
2577 if (sinfo->fun[0].lo != 0)
2579 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2581 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2583 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2584 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2586 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2592 /* Search current function info for a function that contains address
2593 OFFSET in section SEC. */
2595 static struct function_info *
2596 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2598 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2599 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2603 hi = sinfo->num_fun;
2606 mid = (lo + hi) / 2;
2607 if (offset < sinfo->fun[mid].lo)
2609 else if (offset >= sinfo->fun[mid].hi)
2612 return &sinfo->fun[mid];
2614 /* xgettext:c-format */
2615 info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
2617 bfd_set_error (bfd_error_bad_value);
2621 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2622 if CALLEE was new. If this function return FALSE, CALLEE should
2626 insert_callee (struct function_info *caller, struct call_info *callee)
2628 struct call_info **pp, *p;
2630 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2631 if (p->fun == callee->fun)
2633 /* Tail calls use less stack than normal calls. Retain entry
2634 for normal call over one for tail call. */
2635 p->is_tail &= callee->is_tail;
2638 p->fun->start = NULL;
2639 p->fun->is_func = TRUE;
2641 p->count += callee->count;
2642 /* Reorder list so most recent call is first. */
2644 p->next = caller->call_list;
2645 caller->call_list = p;
2648 callee->next = caller->call_list;
2649 caller->call_list = callee;
2653 /* Copy CALL and insert the copy into CALLER. */
2656 copy_callee (struct function_info *caller, const struct call_info *call)
2658 struct call_info *callee;
2659 callee = bfd_malloc (sizeof (*callee));
2663 if (!insert_callee (caller, callee))
2668 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2669 overlay stub sections. */
2672 interesting_section (asection *s)
2674 return (s->output_section != bfd_abs_section_ptr
2675 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2676 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2680 /* Rummage through the relocs for SEC, looking for function calls.
2681 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2682 mark destination symbols on calls as being functions. Also
2683 look at branches, which may be tail calls or go to hot/cold
2684 section part of same function. */
2687 mark_functions_via_relocs (asection *sec,
2688 struct bfd_link_info *info,
2691 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2692 Elf_Internal_Shdr *symtab_hdr;
2694 unsigned int priority = 0;
2695 static bfd_boolean warned;
2697 if (!interesting_section (sec)
2698 || sec->reloc_count == 0)
2701 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2703 if (internal_relocs == NULL)
2706 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2707 psyms = &symtab_hdr->contents;
2708 irela = internal_relocs;
2709 irelaend = irela + sec->reloc_count;
2710 for (; irela < irelaend; irela++)
2712 enum elf_spu_reloc_type r_type;
2713 unsigned int r_indx;
2715 Elf_Internal_Sym *sym;
2716 struct elf_link_hash_entry *h;
2718 bfd_boolean nonbranch, is_call;
2719 struct function_info *caller;
2720 struct call_info *callee;
2722 r_type = ELF32_R_TYPE (irela->r_info);
2723 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2725 r_indx = ELF32_R_SYM (irela->r_info);
2726 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2730 || sym_sec->output_section == bfd_abs_section_ptr)
2736 unsigned char insn[4];
2738 if (!bfd_get_section_contents (sec->owner, sec, insn,
2739 irela->r_offset, 4))
2741 if (is_branch (insn))
2743 is_call = (insn[0] & 0xfd) == 0x31;
2744 priority = insn[1] & 0x0f;
2746 priority |= insn[2];
2748 priority |= insn[3];
2750 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2751 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2754 info->callbacks->einfo
2755 /* xgettext:c-format */
2756 (_("%pB(%pA+0x%v): call to non-code section"
2757 " %pB(%pA), analysis incomplete\n"),
2758 sec->owner, sec, irela->r_offset,
2759 sym_sec->owner, sym_sec);
2774 /* For --auto-overlay, count possible stubs we need for
2775 function pointer references. */
2776 unsigned int sym_type;
2780 sym_type = ELF_ST_TYPE (sym->st_info);
2781 if (sym_type == STT_FUNC)
2783 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2784 spu_hash_table (info)->non_ovly_stub += 1;
2785 /* If the symbol type is STT_FUNC then this must be a
2786 function pointer initialisation. */
2789 /* Ignore data references. */
2790 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2791 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2793 /* Otherwise we probably have a jump table reloc for
2794 a switch statement or some other reference to a
2799 val = h->root.u.def.value;
2801 val = sym->st_value;
2802 val += irela->r_addend;
2806 struct function_info *fun;
2808 if (irela->r_addend != 0)
2810 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2813 fake->st_value = val;
2815 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2819 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2821 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2824 if (irela->r_addend != 0
2825 && fun->u.sym != sym)
2830 caller = find_function (sec, irela->r_offset, info);
2833 callee = bfd_malloc (sizeof *callee);
2837 callee->fun = find_function (sym_sec, val, info);
2838 if (callee->fun == NULL)
2840 callee->is_tail = !is_call;
2841 callee->is_pasted = FALSE;
2842 callee->broken_cycle = FALSE;
2843 callee->priority = priority;
2844 callee->count = nonbranch? 0 : 1;
2845 if (callee->fun->last_caller != sec)
2847 callee->fun->last_caller = sec;
2848 callee->fun->call_count += 1;
2850 if (!insert_callee (caller, callee))
2853 && !callee->fun->is_func
2854 && callee->fun->stack == 0)
2856 /* This is either a tail call or a branch from one part of
2857 the function to another, ie. hot/cold section. If the
2858 destination has been called by some other function then
2859 it is a separate function. We also assume that functions
2860 are not split across input files. */
2861 if (sec->owner != sym_sec->owner)
2863 callee->fun->start = NULL;
2864 callee->fun->is_func = TRUE;
2866 else if (callee->fun->start == NULL)
2868 struct function_info *caller_start = caller;
2869 while (caller_start->start)
2870 caller_start = caller_start->start;
2872 if (caller_start != callee->fun)
2873 callee->fun->start = caller_start;
2877 struct function_info *callee_start;
2878 struct function_info *caller_start;
2879 callee_start = callee->fun;
2880 while (callee_start->start)
2881 callee_start = callee_start->start;
2882 caller_start = caller;
2883 while (caller_start->start)
2884 caller_start = caller_start->start;
2885 if (caller_start != callee_start)
2887 callee->fun->start = NULL;
2888 callee->fun->is_func = TRUE;
2897 /* Handle something like .init or .fini, which has a piece of a function.
2898 These sections are pasted together to form a single function. */
2901 pasted_function (asection *sec)
2903 struct bfd_link_order *l;
2904 struct _spu_elf_section_data *sec_data;
2905 struct spu_elf_stack_info *sinfo;
2906 Elf_Internal_Sym *fake;
2907 struct function_info *fun, *fun_start;
2909 fake = bfd_zmalloc (sizeof (*fake));
2913 fake->st_size = sec->size;
2915 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2916 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2920 /* Find a function immediately preceding this section. */
2922 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2924 if (l->u.indirect.section == sec)
2926 if (fun_start != NULL)
2928 struct call_info *callee = bfd_malloc (sizeof *callee);
2932 fun->start = fun_start;
2934 callee->is_tail = TRUE;
2935 callee->is_pasted = TRUE;
2936 callee->broken_cycle = FALSE;
2937 callee->priority = 0;
2939 if (!insert_callee (fun_start, callee))
2945 if (l->type == bfd_indirect_link_order
2946 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2947 && (sinfo = sec_data->u.i.stack_info) != NULL
2948 && sinfo->num_fun != 0)
2949 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2952 /* Don't return an error if we did not find a function preceding this
2953 section. The section may have incorrect flags. */
2957 /* Map address ranges in code sections to functions. */
2960 discover_functions (struct bfd_link_info *info)
2964 Elf_Internal_Sym ***psym_arr;
2965 asection ***sec_arr;
2966 bfd_boolean gaps = FALSE;
2969 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2972 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2973 if (psym_arr == NULL)
2975 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2976 if (sec_arr == NULL)
2979 for (ibfd = info->input_bfds, bfd_idx = 0;
2981 ibfd = ibfd->link.next, bfd_idx++)
2983 extern const bfd_target spu_elf32_vec;
2984 Elf_Internal_Shdr *symtab_hdr;
2987 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2988 asection **psecs, **p;
2990 if (ibfd->xvec != &spu_elf32_vec)
2993 /* Read all the symbols. */
2994 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2995 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2999 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3000 if (interesting_section (sec))
3008 if (symtab_hdr->contents != NULL)
3010 /* Don't use cached symbols since the generic ELF linker
3011 code only reads local symbols, and we need globals too. */
3012 free (symtab_hdr->contents);
3013 symtab_hdr->contents = NULL;
3015 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3017 symtab_hdr->contents = (void *) syms;
3021 /* Select defined function symbols that are going to be output. */
3022 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3025 psym_arr[bfd_idx] = psyms;
3026 psecs = bfd_malloc (symcount * sizeof (*psecs));
3029 sec_arr[bfd_idx] = psecs;
3030 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3031 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3032 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3036 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3037 if (s != NULL && interesting_section (s))
3040 symcount = psy - psyms;
3043 /* Sort them by section and offset within section. */
3044 sort_syms_syms = syms;
3045 sort_syms_psecs = psecs;
3046 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3048 /* Now inspect the function symbols. */
3049 for (psy = psyms; psy < psyms + symcount; )
3051 asection *s = psecs[*psy - syms];
3052 Elf_Internal_Sym **psy2;
3054 for (psy2 = psy; ++psy2 < psyms + symcount; )
3055 if (psecs[*psy2 - syms] != s)
3058 if (!alloc_stack_info (s, psy2 - psy))
3063 /* First install info about properly typed and sized functions.
3064 In an ideal world this will cover all code sections, except
3065 when partitioning functions into hot and cold sections,
3066 and the horrible pasted together .init and .fini functions. */
3067 for (psy = psyms; psy < psyms + symcount; ++psy)
3070 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3072 asection *s = psecs[sy - syms];
3073 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3078 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3079 if (interesting_section (sec))
3080 gaps |= check_function_ranges (sec, info);
3085 /* See if we can discover more function symbols by looking at
3087 for (ibfd = info->input_bfds, bfd_idx = 0;
3089 ibfd = ibfd->link.next, bfd_idx++)
3093 if (psym_arr[bfd_idx] == NULL)
3096 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3097 if (!mark_functions_via_relocs (sec, info, FALSE))
3101 for (ibfd = info->input_bfds, bfd_idx = 0;
3103 ibfd = ibfd->link.next, bfd_idx++)
3105 Elf_Internal_Shdr *symtab_hdr;
3107 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3110 if ((psyms = psym_arr[bfd_idx]) == NULL)
3113 psecs = sec_arr[bfd_idx];
3115 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3116 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3119 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3120 if (interesting_section (sec))
3121 gaps |= check_function_ranges (sec, info);
3125 /* Finally, install all globals. */
3126 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3130 s = psecs[sy - syms];
3132 /* Global syms might be improperly typed functions. */
3133 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3134 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3136 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3142 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3144 extern const bfd_target spu_elf32_vec;
3147 if (ibfd->xvec != &spu_elf32_vec)
3150 /* Some of the symbols we've installed as marking the
3151 beginning of functions may have a size of zero. Extend
3152 the range of such functions to the beginning of the
3153 next symbol of interest. */
3154 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3155 if (interesting_section (sec))
3157 struct _spu_elf_section_data *sec_data;
3158 struct spu_elf_stack_info *sinfo;
3160 sec_data = spu_elf_section_data (sec);
3161 sinfo = sec_data->u.i.stack_info;
3162 if (sinfo != NULL && sinfo->num_fun != 0)
3165 bfd_vma hi = sec->size;
3167 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3169 sinfo->fun[fun_idx].hi = hi;
3170 hi = sinfo->fun[fun_idx].lo;
3173 sinfo->fun[0].lo = 0;
3175 /* No symbols in this section. Must be .init or .fini
3176 or something similar. */
3177 else if (!pasted_function (sec))
3183 for (ibfd = info->input_bfds, bfd_idx = 0;
3185 ibfd = ibfd->link.next, bfd_idx++)
3187 if (psym_arr[bfd_idx] == NULL)
3190 free (psym_arr[bfd_idx]);
3191 free (sec_arr[bfd_idx]);
3200 /* Iterate over all function_info we have collected, calling DOIT on
3201 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3205 for_each_node (bfd_boolean (*doit) (struct function_info *,
3206 struct bfd_link_info *,
3208 struct bfd_link_info *info,
3214 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3216 extern const bfd_target spu_elf32_vec;
3219 if (ibfd->xvec != &spu_elf32_vec)
3222 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3224 struct _spu_elf_section_data *sec_data;
3225 struct spu_elf_stack_info *sinfo;
3227 if ((sec_data = spu_elf_section_data (sec)) != NULL
3228 && (sinfo = sec_data->u.i.stack_info) != NULL)
3231 for (i = 0; i < sinfo->num_fun; ++i)
3232 if (!root_only || !sinfo->fun[i].non_root)
3233 if (!doit (&sinfo->fun[i], info, param))
3241 /* Transfer call info attached to struct function_info entries for
3242 all of a given function's sections to the first entry. */
3245 transfer_calls (struct function_info *fun,
3246 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3247 void *param ATTRIBUTE_UNUSED)
3249 struct function_info *start = fun->start;
3253 struct call_info *call, *call_next;
3255 while (start->start != NULL)
3256 start = start->start;
3257 for (call = fun->call_list; call != NULL; call = call_next)
3259 call_next = call->next;
3260 if (!insert_callee (start, call))
3263 fun->call_list = NULL;
3268 /* Mark nodes in the call graph that are called by some other node. */
3271 mark_non_root (struct function_info *fun,
3272 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3273 void *param ATTRIBUTE_UNUSED)
3275 struct call_info *call;
3280 for (call = fun->call_list; call; call = call->next)
3282 call->fun->non_root = TRUE;
3283 mark_non_root (call->fun, 0, 0);
3288 /* Remove cycles from the call graph. Set depth of nodes. */
3291 remove_cycles (struct function_info *fun,
3292 struct bfd_link_info *info,
3295 struct call_info **callp, *call;
3296 unsigned int depth = *(unsigned int *) param;
3297 unsigned int max_depth = depth;
3301 fun->marking = TRUE;
3303 callp = &fun->call_list;
3304 while ((call = *callp) != NULL)
3306 call->max_depth = depth + !call->is_pasted;
3307 if (!call->fun->visit2)
3309 if (!remove_cycles (call->fun, info, &call->max_depth))
3311 if (max_depth < call->max_depth)
3312 max_depth = call->max_depth;
3314 else if (call->fun->marking)
3316 struct spu_link_hash_table *htab = spu_hash_table (info);
3318 if (!htab->params->auto_overlay
3319 && htab->params->stack_analysis)
3321 const char *f1 = func_name (fun);
3322 const char *f2 = func_name (call->fun);
3324 /* xgettext:c-format */
3325 info->callbacks->info (_("stack analysis will ignore the call "
3330 call->broken_cycle = TRUE;
3332 callp = &call->next;
3334 fun->marking = FALSE;
3335 *(unsigned int *) param = max_depth;
3339 /* Check that we actually visited all nodes in remove_cycles. If we
3340 didn't, then there is some cycle in the call graph not attached to
3341 any root node. Arbitrarily choose a node in the cycle as a new
3342 root and break the cycle. */
3345 mark_detached_root (struct function_info *fun,
3346 struct bfd_link_info *info,
3351 fun->non_root = FALSE;
3352 *(unsigned int *) param = 0;
3353 return remove_cycles (fun, info, param);
3356 /* Populate call_list for each function. */
3359 build_call_tree (struct bfd_link_info *info)
3364 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3366 extern const bfd_target spu_elf32_vec;
3369 if (ibfd->xvec != &spu_elf32_vec)
3372 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3373 if (!mark_functions_via_relocs (sec, info, TRUE))
3377 /* Transfer call info from hot/cold section part of function
3379 if (!spu_hash_table (info)->params->auto_overlay
3380 && !for_each_node (transfer_calls, info, 0, FALSE))
3383 /* Find the call graph root(s). */
3384 if (!for_each_node (mark_non_root, info, 0, FALSE))
3387 /* Remove cycles from the call graph. We start from the root node(s)
3388 so that we break cycles in a reasonable place. */
3390 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3393 return for_each_node (mark_detached_root, info, &depth, FALSE);
3396 /* qsort predicate to sort calls by priority, max_depth then count. */
3399 sort_calls (const void *a, const void *b)
3401 struct call_info *const *c1 = a;
3402 struct call_info *const *c2 = b;
3405 delta = (*c2)->priority - (*c1)->priority;
3409 delta = (*c2)->max_depth - (*c1)->max_depth;
3413 delta = (*c2)->count - (*c1)->count;
3417 return (char *) c1 - (char *) c2;
3421 unsigned int max_overlay_size;
3424 /* Set linker_mark and gc_mark on any sections that we will put in
3425 overlays. These flags are used by the generic ELF linker, but we
3426 won't be continuing on to bfd_elf_final_link so it is OK to use
3427 them. linker_mark is clear before we get here. Set segment_mark
3428 on sections that are part of a pasted function (excluding the last
3431 Set up function rodata section if --overlay-rodata. We don't
3432 currently include merged string constant rodata sections since
3434 Sort the call graph so that the deepest nodes will be visited
3438 mark_overlay_section (struct function_info *fun,
3439 struct bfd_link_info *info,
3442 struct call_info *call;
3444 struct _mos_param *mos_param = param;
3445 struct spu_link_hash_table *htab = spu_hash_table (info);
3451 if (!fun->sec->linker_mark
3452 && (htab->params->ovly_flavour != ovly_soft_icache
3453 || htab->params->non_ia_text
3454 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3455 || strcmp (fun->sec->name, ".init") == 0
3456 || strcmp (fun->sec->name, ".fini") == 0))
3460 fun->sec->linker_mark = 1;
3461 fun->sec->gc_mark = 1;
3462 fun->sec->segment_mark = 0;
3463 /* Ensure SEC_CODE is set on this text section (it ought to
3464 be!), and SEC_CODE is clear on rodata sections. We use
3465 this flag to differentiate the two overlay section types. */
3466 fun->sec->flags |= SEC_CODE;
3468 size = fun->sec->size;
3469 if (htab->params->auto_overlay & OVERLAY_RODATA)
3473 /* Find the rodata section corresponding to this function's
3475 if (strcmp (fun->sec->name, ".text") == 0)
3477 name = bfd_malloc (sizeof (".rodata"));
3480 memcpy (name, ".rodata", sizeof (".rodata"));
3482 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3484 size_t len = strlen (fun->sec->name);
3485 name = bfd_malloc (len + 3);
3488 memcpy (name, ".rodata", sizeof (".rodata"));
3489 memcpy (name + 7, fun->sec->name + 5, len - 4);
3491 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3493 size_t len = strlen (fun->sec->name) + 1;
3494 name = bfd_malloc (len);
3497 memcpy (name, fun->sec->name, len);
3503 asection *rodata = NULL;
3504 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3505 if (group_sec == NULL)
3506 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3508 while (group_sec != NULL && group_sec != fun->sec)
3510 if (strcmp (group_sec->name, name) == 0)
3515 group_sec = elf_section_data (group_sec)->next_in_group;
3517 fun->rodata = rodata;
3520 size += fun->rodata->size;
3521 if (htab->params->line_size != 0
3522 && size > htab->params->line_size)
3524 size -= fun->rodata->size;
3529 fun->rodata->linker_mark = 1;
3530 fun->rodata->gc_mark = 1;
3531 fun->rodata->flags &= ~SEC_CODE;
3537 if (mos_param->max_overlay_size < size)
3538 mos_param->max_overlay_size = size;
3541 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3546 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3550 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3551 calls[count++] = call;
3553 qsort (calls, count, sizeof (*calls), sort_calls);
3555 fun->call_list = NULL;
3559 calls[count]->next = fun->call_list;
3560 fun->call_list = calls[count];
3565 for (call = fun->call_list; call != NULL; call = call->next)
3567 if (call->is_pasted)
3569 /* There can only be one is_pasted call per function_info. */
3570 BFD_ASSERT (!fun->sec->segment_mark);
3571 fun->sec->segment_mark = 1;
3573 if (!call->broken_cycle
3574 && !mark_overlay_section (call->fun, info, param))
3578 /* Don't put entry code into an overlay. The overlay manager needs
3579 a stack! Also, don't mark .ovl.init as an overlay. */
3580 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3581 == info->output_bfd->start_address
3582 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3584 fun->sec->linker_mark = 0;
3585 if (fun->rodata != NULL)
3586 fun->rodata->linker_mark = 0;
3591 /* If non-zero then unmark functions called from those within sections
3592 that we need to unmark. Unfortunately this isn't reliable since the
3593 call graph cannot know the destination of function pointer calls. */
3594 #define RECURSE_UNMARK 0
3597 asection *exclude_input_section;
3598 asection *exclude_output_section;
3599 unsigned long clearing;
3602 /* Undo some of mark_overlay_section's work. */
3605 unmark_overlay_section (struct function_info *fun,
3606 struct bfd_link_info *info,
3609 struct call_info *call;
3610 struct _uos_param *uos_param = param;
3611 unsigned int excluded = 0;
3619 if (fun->sec == uos_param->exclude_input_section
3620 || fun->sec->output_section == uos_param->exclude_output_section)
3624 uos_param->clearing += excluded;
3626 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3628 fun->sec->linker_mark = 0;
3630 fun->rodata->linker_mark = 0;
3633 for (call = fun->call_list; call != NULL; call = call->next)
3634 if (!call->broken_cycle
3635 && !unmark_overlay_section (call->fun, info, param))
3639 uos_param->clearing -= excluded;
3644 unsigned int lib_size;
3645 asection **lib_sections;
3648 /* Add sections we have marked as belonging to overlays to an array
3649 for consideration as non-overlay sections. The array consist of
3650 pairs of sections, (text,rodata), for functions in the call graph. */
3653 collect_lib_sections (struct function_info *fun,
3654 struct bfd_link_info *info,
3657 struct _cl_param *lib_param = param;
3658 struct call_info *call;
3665 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3668 size = fun->sec->size;
3670 size += fun->rodata->size;
3672 if (size <= lib_param->lib_size)
3674 *lib_param->lib_sections++ = fun->sec;
3675 fun->sec->gc_mark = 0;
3676 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3678 *lib_param->lib_sections++ = fun->rodata;
3679 fun->rodata->gc_mark = 0;
3682 *lib_param->lib_sections++ = NULL;
3685 for (call = fun->call_list; call != NULL; call = call->next)
3686 if (!call->broken_cycle)
3687 collect_lib_sections (call->fun, info, param);
3692 /* qsort predicate to sort sections by call count. */
3695 sort_lib (const void *a, const void *b)
3697 asection *const *s1 = a;
3698 asection *const *s2 = b;
3699 struct _spu_elf_section_data *sec_data;
3700 struct spu_elf_stack_info *sinfo;
3704 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3705 && (sinfo = sec_data->u.i.stack_info) != NULL)
3708 for (i = 0; i < sinfo->num_fun; ++i)
3709 delta -= sinfo->fun[i].call_count;
3712 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3713 && (sinfo = sec_data->u.i.stack_info) != NULL)
3716 for (i = 0; i < sinfo->num_fun; ++i)
3717 delta += sinfo->fun[i].call_count;
3726 /* Remove some sections from those marked to be in overlays. Choose
3727 those that are called from many places, likely library functions. */
3730 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3733 asection **lib_sections;
3734 unsigned int i, lib_count;
3735 struct _cl_param collect_lib_param;
3736 struct function_info dummy_caller;
3737 struct spu_link_hash_table *htab;
3739 memset (&dummy_caller, 0, sizeof (dummy_caller));
3741 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3743 extern const bfd_target spu_elf32_vec;
3746 if (ibfd->xvec != &spu_elf32_vec)
3749 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3750 if (sec->linker_mark
3751 && sec->size < lib_size
3752 && (sec->flags & SEC_CODE) != 0)
3755 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3756 if (lib_sections == NULL)
3757 return (unsigned int) -1;
3758 collect_lib_param.lib_size = lib_size;
3759 collect_lib_param.lib_sections = lib_sections;
3760 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3762 return (unsigned int) -1;
3763 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3765 /* Sort sections so that those with the most calls are first. */
3767 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3769 htab = spu_hash_table (info);
3770 for (i = 0; i < lib_count; i++)
3772 unsigned int tmp, stub_size;
3774 struct _spu_elf_section_data *sec_data;
3775 struct spu_elf_stack_info *sinfo;
3777 sec = lib_sections[2 * i];
3778 /* If this section is OK, its size must be less than lib_size. */
3780 /* If it has a rodata section, then add that too. */
3781 if (lib_sections[2 * i + 1])
3782 tmp += lib_sections[2 * i + 1]->size;
3783 /* Add any new overlay call stubs needed by the section. */
3786 && (sec_data = spu_elf_section_data (sec)) != NULL
3787 && (sinfo = sec_data->u.i.stack_info) != NULL)
3790 struct call_info *call;
3792 for (k = 0; k < sinfo->num_fun; ++k)
3793 for (call = sinfo->fun[k].call_list; call; call = call->next)
3794 if (call->fun->sec->linker_mark)
3796 struct call_info *p;
3797 for (p = dummy_caller.call_list; p; p = p->next)
3798 if (p->fun == call->fun)
3801 stub_size += ovl_stub_size (htab->params);
3804 if (tmp + stub_size < lib_size)
3806 struct call_info **pp, *p;
3808 /* This section fits. Mark it as non-overlay. */
3809 lib_sections[2 * i]->linker_mark = 0;
3810 if (lib_sections[2 * i + 1])
3811 lib_sections[2 * i + 1]->linker_mark = 0;
3812 lib_size -= tmp + stub_size;
3813 /* Call stubs to the section we just added are no longer
3815 pp = &dummy_caller.call_list;
3816 while ((p = *pp) != NULL)
3817 if (!p->fun->sec->linker_mark)
3819 lib_size += ovl_stub_size (htab->params);
3825 /* Add new call stubs to dummy_caller. */
3826 if ((sec_data = spu_elf_section_data (sec)) != NULL
3827 && (sinfo = sec_data->u.i.stack_info) != NULL)
3830 struct call_info *call;
3832 for (k = 0; k < sinfo->num_fun; ++k)
3833 for (call = sinfo->fun[k].call_list;
3836 if (call->fun->sec->linker_mark)
3838 struct call_info *callee;
3839 callee = bfd_malloc (sizeof (*callee));
3841 return (unsigned int) -1;
3843 if (!insert_callee (&dummy_caller, callee))
3849 while (dummy_caller.call_list != NULL)
3851 struct call_info *call = dummy_caller.call_list;
3852 dummy_caller.call_list = call->next;
3855 for (i = 0; i < 2 * lib_count; i++)
3856 if (lib_sections[i])
3857 lib_sections[i]->gc_mark = 1;
3858 free (lib_sections);
3862 /* Build an array of overlay sections. The deepest node's section is
3863 added first, then its parent node's section, then everything called
3864 from the parent section. The idea being to group sections to
3865 minimise calls between different overlays. */
3868 collect_overlays (struct function_info *fun,
3869 struct bfd_link_info *info,
3872 struct call_info *call;
3873 bfd_boolean added_fun;
3874 asection ***ovly_sections = param;
3880 for (call = fun->call_list; call != NULL; call = call->next)
3881 if (!call->is_pasted && !call->broken_cycle)
3883 if (!collect_overlays (call->fun, info, ovly_sections))
3889 if (fun->sec->linker_mark && fun->sec->gc_mark)
3891 fun->sec->gc_mark = 0;
3892 *(*ovly_sections)++ = fun->sec;
3893 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3895 fun->rodata->gc_mark = 0;
3896 *(*ovly_sections)++ = fun->rodata;
3899 *(*ovly_sections)++ = NULL;
3902 /* Pasted sections must stay with the first section. We don't
3903 put pasted sections in the array, just the first section.
3904 Mark subsequent sections as already considered. */
3905 if (fun->sec->segment_mark)
3907 struct function_info *call_fun = fun;
3910 for (call = call_fun->call_list; call != NULL; call = call->next)
3911 if (call->is_pasted)
3913 call_fun = call->fun;
3914 call_fun->sec->gc_mark = 0;
3915 if (call_fun->rodata)
3916 call_fun->rodata->gc_mark = 0;
3922 while (call_fun->sec->segment_mark);
3926 for (call = fun->call_list; call != NULL; call = call->next)
3927 if (!call->broken_cycle
3928 && !collect_overlays (call->fun, info, ovly_sections))
3933 struct _spu_elf_section_data *sec_data;
3934 struct spu_elf_stack_info *sinfo;
3936 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3937 && (sinfo = sec_data->u.i.stack_info) != NULL)
3940 for (i = 0; i < sinfo->num_fun; ++i)
3941 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3949 struct _sum_stack_param {
3951 size_t overall_stack;
3952 bfd_boolean emit_stack_syms;
3955 /* Descend the call graph for FUN, accumulating total stack required. */
3958 sum_stack (struct function_info *fun,
3959 struct bfd_link_info *info,
3962 struct call_info *call;
3963 struct function_info *max;
3964 size_t stack, cum_stack;
3966 bfd_boolean has_call;
3967 struct _sum_stack_param *sum_stack_param = param;
3968 struct spu_link_hash_table *htab;
3970 cum_stack = fun->stack;
3971 sum_stack_param->cum_stack = cum_stack;
3977 for (call = fun->call_list; call; call = call->next)
3979 if (call->broken_cycle)
3981 if (!call->is_pasted)
3983 if (!sum_stack (call->fun, info, sum_stack_param))
3985 stack = sum_stack_param->cum_stack;
3986 /* Include caller stack for normal calls, don't do so for
3987 tail calls. fun->stack here is local stack usage for
3989 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3990 stack += fun->stack;
3991 if (cum_stack < stack)
3998 sum_stack_param->cum_stack = cum_stack;
4000 /* Now fun->stack holds cumulative stack. */
4001 fun->stack = cum_stack;
4005 && sum_stack_param->overall_stack < cum_stack)
4006 sum_stack_param->overall_stack = cum_stack;
4008 htab = spu_hash_table (info);
4009 if (htab->params->auto_overlay)
4012 f1 = func_name (fun);
4013 if (htab->params->stack_analysis)
4016 info->callbacks->info (" %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4017 info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4018 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4022 info->callbacks->minfo (_(" calls:\n"));
4023 for (call = fun->call_list; call; call = call->next)
4024 if (!call->is_pasted && !call->broken_cycle)
4026 const char *f2 = func_name (call->fun);
4027 const char *ann1 = call->fun == max ? "*" : " ";
4028 const char *ann2 = call->is_tail ? "t" : " ";
4030 info->callbacks->minfo (" %s%s %s\n", ann1, ann2, f2);
4035 if (sum_stack_param->emit_stack_syms)
4037 char *name = bfd_malloc (18 + strlen (f1));
4038 struct elf_link_hash_entry *h;
4043 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4044 sprintf (name, "__stack_%s", f1);
4046 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4048 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4051 && (h->root.type == bfd_link_hash_new
4052 || h->root.type == bfd_link_hash_undefined
4053 || h->root.type == bfd_link_hash_undefweak))
4055 h->root.type = bfd_link_hash_defined;
4056 h->root.u.def.section = bfd_abs_section_ptr;
4057 h->root.u.def.value = cum_stack;
4062 h->ref_regular_nonweak = 1;
4063 h->forced_local = 1;
4071 /* SEC is part of a pasted function. Return the call_info for the
4072 next section of this function. */
4074 static struct call_info *
4075 find_pasted_call (asection *sec)
4077 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4078 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4079 struct call_info *call;
4082 for (k = 0; k < sinfo->num_fun; ++k)
4083 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4084 if (call->is_pasted)
4090 /* qsort predicate to sort bfds by file name. */
4093 sort_bfds (const void *a, const void *b)
4095 bfd *const *abfd1 = a;
4096 bfd *const *abfd2 = b;
4098 return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4102 print_one_overlay_section (FILE *script,
4105 unsigned int ovlynum,
4106 unsigned int *ovly_map,
4107 asection **ovly_sections,
4108 struct bfd_link_info *info)
4112 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4114 asection *sec = ovly_sections[2 * j];
4116 if (fprintf (script, " %s%c%s (%s)\n",
4117 (sec->owner->my_archive != NULL
4118 ? sec->owner->my_archive->filename : ""),
4119 info->path_separator,
4120 sec->owner->filename,
4123 if (sec->segment_mark)
4125 struct call_info *call = find_pasted_call (sec);
4126 while (call != NULL)
4128 struct function_info *call_fun = call->fun;
4129 sec = call_fun->sec;
4130 if (fprintf (script, " %s%c%s (%s)\n",
4131 (sec->owner->my_archive != NULL
4132 ? sec->owner->my_archive->filename : ""),
4133 info->path_separator,
4134 sec->owner->filename,
4137 for (call = call_fun->call_list; call; call = call->next)
4138 if (call->is_pasted)
4144 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4146 asection *sec = ovly_sections[2 * j + 1];
4148 && fprintf (script, " %s%c%s (%s)\n",
4149 (sec->owner->my_archive != NULL
4150 ? sec->owner->my_archive->filename : ""),
4151 info->path_separator,
4152 sec->owner->filename,
4156 sec = ovly_sections[2 * j];
4157 if (sec->segment_mark)
4159 struct call_info *call = find_pasted_call (sec);
4160 while (call != NULL)
4162 struct function_info *call_fun = call->fun;
4163 sec = call_fun->rodata;
4165 && fprintf (script, " %s%c%s (%s)\n",
4166 (sec->owner->my_archive != NULL
4167 ? sec->owner->my_archive->filename : ""),
4168 info->path_separator,
4169 sec->owner->filename,
4172 for (call = call_fun->call_list; call; call = call->next)
4173 if (call->is_pasted)
4182 /* Handle --auto-overlay. */
4185 spu_elf_auto_overlay (struct bfd_link_info *info)
4189 struct elf_segment_map *m;
4190 unsigned int fixed_size, lo, hi;
4191 unsigned int reserved;
4192 struct spu_link_hash_table *htab;
4193 unsigned int base, i, count, bfd_count;
4194 unsigned int region, ovlynum;
4195 asection **ovly_sections, **ovly_p;
4196 unsigned int *ovly_map;
4198 unsigned int total_overlay_size, overlay_size;
4199 const char *ovly_mgr_entry;
4200 struct elf_link_hash_entry *h;
4201 struct _mos_param mos_param;
4202 struct _uos_param uos_param;
4203 struct function_info dummy_caller;
4205 /* Find the extents of our loadable image. */
4206 lo = (unsigned int) -1;
4208 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4209 if (m->p_type == PT_LOAD)
4210 for (i = 0; i < m->count; i++)
4211 if (m->sections[i]->size != 0)
4213 if (m->sections[i]->vma < lo)
4214 lo = m->sections[i]->vma;
4215 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4216 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4218 fixed_size = hi + 1 - lo;
4220 if (!discover_functions (info))
4223 if (!build_call_tree (info))
4226 htab = spu_hash_table (info);
4227 reserved = htab->params->auto_overlay_reserved;
4230 struct _sum_stack_param sum_stack_param;
4232 sum_stack_param.emit_stack_syms = 0;
4233 sum_stack_param.overall_stack = 0;
4234 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4236 reserved = (sum_stack_param.overall_stack
4237 + htab->params->extra_stack_space);
4240 /* No need for overlays if everything already fits. */
4241 if (fixed_size + reserved <= htab->local_store
4242 && htab->params->ovly_flavour != ovly_soft_icache)
4244 htab->params->auto_overlay = 0;
4248 uos_param.exclude_input_section = 0;
4249 uos_param.exclude_output_section
4250 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4252 ovly_mgr_entry = "__ovly_load";
4253 if (htab->params->ovly_flavour == ovly_soft_icache)
4254 ovly_mgr_entry = "__icache_br_handler";
4255 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4256 FALSE, FALSE, FALSE);
4258 && (h->root.type == bfd_link_hash_defined
4259 || h->root.type == bfd_link_hash_defweak)
4262 /* We have a user supplied overlay manager. */
4263 uos_param.exclude_input_section = h->root.u.def.section;
4267 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4268 builtin version to .text, and will adjust .text size. */
4269 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4272 /* Mark overlay sections, and find max overlay section size. */
4273 mos_param.max_overlay_size = 0;
4274 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4277 /* We can't put the overlay manager or interrupt routines in
4279 uos_param.clearing = 0;
4280 if ((uos_param.exclude_input_section
4281 || uos_param.exclude_output_section)
4282 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4286 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4288 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4289 if (bfd_arr == NULL)
4292 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4295 total_overlay_size = 0;
4296 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4298 extern const bfd_target spu_elf32_vec;
4300 unsigned int old_count;
4302 if (ibfd->xvec != &spu_elf32_vec)
4306 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4307 if (sec->linker_mark)
4309 if ((sec->flags & SEC_CODE) != 0)
4311 fixed_size -= sec->size;
4312 total_overlay_size += sec->size;
4314 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4315 && sec->output_section->owner == info->output_bfd
4316 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4317 fixed_size -= sec->size;
4318 if (count != old_count)
4319 bfd_arr[bfd_count++] = ibfd;
4322 /* Since the overlay link script selects sections by file name and
4323 section name, ensure that file names are unique. */
4326 bfd_boolean ok = TRUE;
4328 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4329 for (i = 1; i < bfd_count; ++i)
4330 if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4332 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4334 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4335 /* xgettext:c-format */
4336 info->callbacks->einfo (_("%s duplicated in %s\n"),
4337 bfd_arr[i]->filename,
4338 bfd_arr[i]->my_archive->filename);
4340 info->callbacks->einfo (_("%s duplicated\n"),
4341 bfd_arr[i]->filename);
4347 info->callbacks->einfo (_("sorry, no support for duplicate "
4348 "object files in auto-overlay script\n"));
4349 bfd_set_error (bfd_error_bad_value);
4355 fixed_size += reserved;
4356 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4357 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4359 if (htab->params->ovly_flavour == ovly_soft_icache)
4361 /* Stubs in the non-icache area are bigger. */
4362 fixed_size += htab->non_ovly_stub * 16;
4363 /* Space for icache manager tables.
4364 a) Tag array, one quadword per cache line.
4365 - word 0: ia address of present line, init to zero. */
4366 fixed_size += 16 << htab->num_lines_log2;
4367 /* b) Rewrite "to" list, one quadword per cache line. */
4368 fixed_size += 16 << htab->num_lines_log2;
4369 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4370 to a power-of-two number of full quadwords) per cache line. */
4371 fixed_size += 16 << (htab->fromelem_size_log2
4372 + htab->num_lines_log2);
4373 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4378 /* Guess number of overlays. Assuming overlay buffer is on
4379 average only half full should be conservative. */
4380 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4381 / (htab->local_store - fixed_size));
4382 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4383 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4387 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4388 /* xgettext:c-format */
4389 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4390 "size of 0x%v exceeds local store\n"),
4391 (bfd_vma) fixed_size,
4392 (bfd_vma) mos_param.max_overlay_size);
4394 /* Now see if we should put some functions in the non-overlay area. */
4395 else if (fixed_size < htab->params->auto_overlay_fixed)
4397 unsigned int max_fixed, lib_size;
4399 max_fixed = htab->local_store - mos_param.max_overlay_size;
4400 if (max_fixed > htab->params->auto_overlay_fixed)
4401 max_fixed = htab->params->auto_overlay_fixed;
4402 lib_size = max_fixed - fixed_size;
4403 lib_size = auto_ovl_lib_functions (info, lib_size);
4404 if (lib_size == (unsigned int) -1)
4406 fixed_size = max_fixed - lib_size;
4409 /* Build an array of sections, suitably sorted to place into
4411 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4412 if (ovly_sections == NULL)
4414 ovly_p = ovly_sections;
4415 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4417 count = (size_t) (ovly_p - ovly_sections) / 2;
4418 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4419 if (ovly_map == NULL)
4422 memset (&dummy_caller, 0, sizeof (dummy_caller));
4423 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4424 if (htab->params->line_size != 0)
4425 overlay_size = htab->params->line_size;
4428 while (base < count)
4430 unsigned int size = 0, rosize = 0, roalign = 0;
4432 for (i = base; i < count; i++)
4434 asection *sec, *rosec;
4435 unsigned int tmp, rotmp;
4436 unsigned int num_stubs;
4437 struct call_info *call, *pasty;
4438 struct _spu_elf_section_data *sec_data;
4439 struct spu_elf_stack_info *sinfo;
4442 /* See whether we can add this section to the current
4443 overlay without overflowing our overlay buffer. */
4444 sec = ovly_sections[2 * i];
4445 tmp = align_power (size, sec->alignment_power) + sec->size;
4447 rosec = ovly_sections[2 * i + 1];
4450 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4451 if (roalign < rosec->alignment_power)
4452 roalign = rosec->alignment_power;
4454 if (align_power (tmp, roalign) + rotmp > overlay_size)
4456 if (sec->segment_mark)
4458 /* Pasted sections must stay together, so add their
4460 pasty = find_pasted_call (sec);
4461 while (pasty != NULL)
4463 struct function_info *call_fun = pasty->fun;
4464 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4465 + call_fun->sec->size);
4466 if (call_fun->rodata)
4468 rotmp = (align_power (rotmp,
4469 call_fun->rodata->alignment_power)
4470 + call_fun->rodata->size);
4471 if (roalign < rosec->alignment_power)
4472 roalign = rosec->alignment_power;
4474 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4475 if (pasty->is_pasted)
4479 if (align_power (tmp, roalign) + rotmp > overlay_size)
4482 /* If we add this section, we might need new overlay call
4483 stubs. Add any overlay section calls to dummy_call. */
4485 sec_data = spu_elf_section_data (sec);
4486 sinfo = sec_data->u.i.stack_info;
4487 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4488 for (call = sinfo->fun[k].call_list; call; call = call->next)
4489 if (call->is_pasted)
4491 BFD_ASSERT (pasty == NULL);
4494 else if (call->fun->sec->linker_mark)
4496 if (!copy_callee (&dummy_caller, call))
4499 while (pasty != NULL)
4501 struct function_info *call_fun = pasty->fun;
4503 for (call = call_fun->call_list; call; call = call->next)
4504 if (call->is_pasted)
4506 BFD_ASSERT (pasty == NULL);
4509 else if (!copy_callee (&dummy_caller, call))
4513 /* Calculate call stub size. */
4515 for (call = dummy_caller.call_list; call; call = call->next)
4517 unsigned int stub_delta = 1;
4519 if (htab->params->ovly_flavour == ovly_soft_icache)
4520 stub_delta = call->count;
4521 num_stubs += stub_delta;
4523 /* If the call is within this overlay, we won't need a
4525 for (k = base; k < i + 1; k++)
4526 if (call->fun->sec == ovly_sections[2 * k])
4528 num_stubs -= stub_delta;
4532 if (htab->params->ovly_flavour == ovly_soft_icache
4533 && num_stubs > htab->params->max_branch)
4535 if (align_power (tmp, roalign) + rotmp
4536 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4544 /* xgettext:c-format */
4545 info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4546 ovly_sections[2 * i]->owner,
4547 ovly_sections[2 * i],
4548 ovly_sections[2 * i + 1] ? " + rodata" : "");
4549 bfd_set_error (bfd_error_bad_value);
4553 while (dummy_caller.call_list != NULL)
4555 struct call_info *call = dummy_caller.call_list;
4556 dummy_caller.call_list = call->next;
4562 ovly_map[base++] = ovlynum;
4565 script = htab->params->spu_elf_open_overlay_script ();
4567 if (htab->params->ovly_flavour == ovly_soft_icache)
4569 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4572 if (fprintf (script,
4573 " . = ALIGN (%u);\n"
4574 " .ovl.init : { *(.ovl.init) }\n"
4575 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4576 htab->params->line_size) <= 0)
4581 while (base < count)
4583 unsigned int indx = ovlynum - 1;
4584 unsigned int vma, lma;
4586 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4587 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4589 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4590 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4591 ovlynum, vma, lma) <= 0)
4594 base = print_one_overlay_section (script, base, count, ovlynum,
4595 ovly_map, ovly_sections, info);
4596 if (base == (unsigned) -1)
4599 if (fprintf (script, " }\n") <= 0)
4605 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4606 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4609 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4614 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4617 if (fprintf (script,
4618 " . = ALIGN (16);\n"
4619 " .ovl.init : { *(.ovl.init) }\n"
4620 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4623 for (region = 1; region <= htab->params->num_lines; region++)
4627 while (base < count && ovly_map[base] < ovlynum)
4635 /* We need to set lma since we are overlaying .ovl.init. */
4636 if (fprintf (script,
4637 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4642 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4646 while (base < count)
4648 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4651 base = print_one_overlay_section (script, base, count, ovlynum,
4652 ovly_map, ovly_sections, info);
4653 if (base == (unsigned) -1)
4656 if (fprintf (script, " }\n") <= 0)
4659 ovlynum += htab->params->num_lines;
4660 while (base < count && ovly_map[base] < ovlynum)
4664 if (fprintf (script, " }\n") <= 0)
4668 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4673 free (ovly_sections);
4675 if (fclose (script) != 0)
4678 if (htab->params->auto_overlay & AUTO_RELINK)
4679 (*htab->params->spu_elf_relink) ();
4684 bfd_set_error (bfd_error_system_call);
4686 info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
4690 /* Provide an estimate of total stack required. */
4693 spu_elf_stack_analysis (struct bfd_link_info *info)
4695 struct spu_link_hash_table *htab;
4696 struct _sum_stack_param sum_stack_param;
4698 if (!discover_functions (info))
4701 if (!build_call_tree (info))
4704 htab = spu_hash_table (info);
4705 if (htab->params->stack_analysis)
4707 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4708 info->callbacks->minfo (_("\nStack size for functions. "
4709 "Annotations: '*' max stack, 't' tail call\n"));
4712 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4713 sum_stack_param.overall_stack = 0;
4714 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4717 if (htab->params->stack_analysis)
4718 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4719 (bfd_vma) sum_stack_param.overall_stack);
4723 /* Perform a final link. */
4726 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4728 struct spu_link_hash_table *htab = spu_hash_table (info);
4730 if (htab->params->auto_overlay)
4731 spu_elf_auto_overlay (info);
4733 if ((htab->params->stack_analysis
4734 || (htab->params->ovly_flavour == ovly_soft_icache
4735 && htab->params->lrlive_analysis))
4736 && !spu_elf_stack_analysis (info))
4737 info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4739 if (!spu_elf_build_stubs (info))
4740 info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4742 return bfd_elf_final_link (output_bfd, info);
4745 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4746 and !info->emitrelocations. Returns a count of special relocs
4747 that need to be emitted. */
4750 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4752 Elf_Internal_Rela *relocs;
4753 unsigned int count = 0;
4755 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4759 Elf_Internal_Rela *rel;
4760 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4762 for (rel = relocs; rel < relend; rel++)
4764 int r_type = ELF32_R_TYPE (rel->r_info);
4765 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4769 if (elf_section_data (sec)->relocs != relocs)
4776 /* Functions for adding fixup records to .fixup */
4778 #define FIXUP_RECORD_SIZE 4
4780 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4781 bfd_put_32 (output_bfd, addr, \
4782 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4783 #define FIXUP_GET(output_bfd,htab,index) \
4784 bfd_get_32 (output_bfd, \
4785 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4787 /* Store OFFSET in .fixup. This assumes it will be called with an
4788 increasing OFFSET. When this OFFSET fits with the last base offset,
4789 it just sets a bit, otherwise it adds a new fixup record. */
4791 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4794 struct spu_link_hash_table *htab = spu_hash_table (info);
4795 asection *sfixup = htab->sfixup;
4796 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4797 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4798 if (sfixup->reloc_count == 0)
4800 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4801 sfixup->reloc_count++;
4805 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4806 if (qaddr != (base & ~(bfd_vma) 15))
4808 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4809 _bfd_error_handler (_("fatal error while creating .fixup"));
4810 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4811 sfixup->reloc_count++;
4814 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4818 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4821 spu_elf_relocate_section (bfd *output_bfd,
4822 struct bfd_link_info *info,
4824 asection *input_section,
4826 Elf_Internal_Rela *relocs,
4827 Elf_Internal_Sym *local_syms,
4828 asection **local_sections)
4830 Elf_Internal_Shdr *symtab_hdr;
4831 struct elf_link_hash_entry **sym_hashes;
4832 Elf_Internal_Rela *rel, *relend;
4833 struct spu_link_hash_table *htab;
4836 bfd_boolean emit_these_relocs = FALSE;
4837 bfd_boolean is_ea_sym;
4839 unsigned int iovl = 0;
4841 htab = spu_hash_table (info);
4842 stubs = (htab->stub_sec != NULL
4843 && maybe_needs_stubs (input_section));
4844 iovl = overlay_index (input_section);
4845 ea = bfd_get_section_by_name (output_bfd, "._ea");
4846 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4847 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4850 relend = relocs + input_section->reloc_count;
4851 for (; rel < relend; rel++)
4854 reloc_howto_type *howto;
4855 unsigned int r_symndx;
4856 Elf_Internal_Sym *sym;
4858 struct elf_link_hash_entry *h;
4859 const char *sym_name;
4862 bfd_reloc_status_type r;
4863 bfd_boolean unresolved_reloc;
4864 enum _stub_type stub_type;
4866 r_symndx = ELF32_R_SYM (rel->r_info);
4867 r_type = ELF32_R_TYPE (rel->r_info);
4868 howto = elf_howto_table + r_type;
4869 unresolved_reloc = FALSE;
4873 if (r_symndx < symtab_hdr->sh_info)
4875 sym = local_syms + r_symndx;
4876 sec = local_sections[r_symndx];
4877 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4878 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4882 if (sym_hashes == NULL)
4885 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4887 if (info->wrap_hash != NULL
4888 && (input_section->flags & SEC_DEBUGGING) != 0)
4889 h = ((struct elf_link_hash_entry *)
4890 unwrap_hash_lookup (info, input_bfd, &h->root));
4892 while (h->root.type == bfd_link_hash_indirect
4893 || h->root.type == bfd_link_hash_warning)
4894 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4897 if (h->root.type == bfd_link_hash_defined
4898 || h->root.type == bfd_link_hash_defweak)
4900 sec = h->root.u.def.section;
4902 || sec->output_section == NULL)
4903 /* Set a flag that will be cleared later if we find a
4904 relocation value for this symbol. output_section
4905 is typically NULL for symbols satisfied by a shared
4907 unresolved_reloc = TRUE;
4909 relocation = (h->root.u.def.value
4910 + sec->output_section->vma
4911 + sec->output_offset);
4913 else if (h->root.type == bfd_link_hash_undefweak)
4915 else if (info->unresolved_syms_in_objects == RM_IGNORE
4916 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4918 else if (!bfd_link_relocatable (info)
4919 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4922 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4923 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4924 (*info->callbacks->undefined_symbol) (info,
4925 h->root.root.string,
4928 rel->r_offset, err);
4930 sym_name = h->root.root.string;
4933 if (sec != NULL && discarded_section (sec))
4934 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4935 rel, 1, relend, howto, 0, contents);
4937 if (bfd_link_relocatable (info))
4940 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4941 if (r_type == R_SPU_ADD_PIC
4943 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4945 bfd_byte *loc = contents + rel->r_offset;
4951 is_ea_sym = (ea != NULL
4953 && sec->output_section == ea);
4955 /* If this symbol is in an overlay area, we may need to relocate
4956 to the overlay stub. */
4957 addend = rel->r_addend;
4960 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4961 contents, info)) != no_stub)
4963 unsigned int ovl = 0;
4964 struct got_entry *g, **head;
4966 if (stub_type != nonovl_stub)
4970 head = &h->got.glist;
4972 head = elf_local_got_ents (input_bfd) + r_symndx;
4974 for (g = *head; g != NULL; g = g->next)
4975 if (htab->params->ovly_flavour == ovly_soft_icache
4977 && g->br_addr == (rel->r_offset
4978 + input_section->output_offset
4979 + input_section->output_section->vma))
4980 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4985 relocation = g->stub_addr;
4990 /* For soft icache, encode the overlay index into addresses. */
4991 if (htab->params->ovly_flavour == ovly_soft_icache
4992 && (r_type == R_SPU_ADDR16_HI
4993 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4996 unsigned int ovl = overlay_index (sec);
4999 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
5000 relocation += set_id << 18;
5005 if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5006 && (input_section->flags & SEC_ALLOC) != 0
5007 && r_type == R_SPU_ADDR32)
5010 offset = rel->r_offset + input_section->output_section->vma
5011 + input_section->output_offset;
5012 spu_elf_emit_fixup (output_bfd, info, offset);
5015 if (unresolved_reloc)
5017 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5021 /* ._ea is a special section that isn't allocated in SPU
5022 memory, but rather occupies space in PPU memory as
5023 part of an embedded ELF image. If this reloc is
5024 against a symbol defined in ._ea, then transform the
5025 reloc into an equivalent one without a symbol
5026 relative to the start of the ELF image. */
5027 rel->r_addend += (relocation
5029 + elf_section_data (ea)->this_hdr.sh_offset);
5030 rel->r_info = ELF32_R_INFO (0, r_type);
5032 emit_these_relocs = TRUE;
5036 unresolved_reloc = TRUE;
5038 if (unresolved_reloc
5039 && _bfd_elf_section_offset (output_bfd, info, input_section,
5040 rel->r_offset) != (bfd_vma) -1)
5043 /* xgettext:c-format */
5044 (_("%pB(%s+%#" PRIx64 "): "
5045 "unresolvable %s relocation against symbol `%s'"),
5047 bfd_get_section_name (input_bfd, input_section),
5048 (uint64_t) rel->r_offset,
5054 r = _bfd_final_link_relocate (howto,
5058 rel->r_offset, relocation, addend);
5060 if (r != bfd_reloc_ok)
5062 const char *msg = (const char *) 0;
5066 case bfd_reloc_overflow:
5067 (*info->callbacks->reloc_overflow)
5068 (info, (h ? &h->root : NULL), sym_name, howto->name,
5069 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5072 case bfd_reloc_undefined:
5073 (*info->callbacks->undefined_symbol)
5074 (info, sym_name, input_bfd, input_section, rel->r_offset, TRUE);
5077 case bfd_reloc_outofrange:
5078 msg = _("internal error: out of range error");
5081 case bfd_reloc_notsupported:
5082 msg = _("internal error: unsupported relocation error");
5085 case bfd_reloc_dangerous:
5086 msg = _("internal error: dangerous error");
5090 msg = _("internal error: unknown error");
5095 (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5096 input_section, rel->r_offset);
5103 && emit_these_relocs
5104 && !info->emitrelocations)
5106 Elf_Internal_Rela *wrel;
5107 Elf_Internal_Shdr *rel_hdr;
5109 wrel = rel = relocs;
5110 relend = relocs + input_section->reloc_count;
5111 for (; rel < relend; rel++)
5115 r_type = ELF32_R_TYPE (rel->r_info);
5116 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5119 input_section->reloc_count = wrel - relocs;
5120 /* Backflips for _bfd_elf_link_output_relocs. */
5121 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5122 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5130 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5131 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5136 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5139 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5140 const char *sym_name ATTRIBUTE_UNUSED,
5141 Elf_Internal_Sym *sym,
5142 asection *sym_sec ATTRIBUTE_UNUSED,
5143 struct elf_link_hash_entry *h)
5145 struct spu_link_hash_table *htab = spu_hash_table (info);
5147 if (!bfd_link_relocatable (info)
5148 && htab->stub_sec != NULL
5150 && (h->root.type == bfd_link_hash_defined
5151 || h->root.type == bfd_link_hash_defweak)
5153 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5155 struct got_entry *g;
5157 for (g = h->got.glist; g != NULL; g = g->next)
5158 if (htab->params->ovly_flavour == ovly_soft_icache
5159 ? g->br_addr == g->stub_addr
5160 : g->addend == 0 && g->ovl == 0)
5162 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5163 (htab->stub_sec[0]->output_section->owner,
5164 htab->stub_sec[0]->output_section));
5165 sym->st_value = g->stub_addr;
5173 static int spu_plugin = 0;
5176 spu_elf_plugin (int val)
5181 /* Set ELF header e_type for plugins. */
5184 spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
5188 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5190 i_ehdrp->e_type = ET_DYN;
5193 _bfd_elf_post_process_headers (abfd, info);
5196 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5197 segments for overlays. */
5200 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5207 struct spu_link_hash_table *htab = spu_hash_table (info);
5208 extra = htab->num_overlays;
5214 sec = bfd_get_section_by_name (abfd, ".toe");
5215 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5221 /* Remove .toe section from other PT_LOAD segments and put it in
5222 a segment of its own. Put overlays in separate segments too. */
5225 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5228 struct elf_segment_map *m, *m_overlay;
5229 struct elf_segment_map **p, **p_overlay;
5235 toe = bfd_get_section_by_name (abfd, ".toe");
5236 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5237 if (m->p_type == PT_LOAD && m->count > 1)
5238 for (i = 0; i < m->count; i++)
5239 if ((s = m->sections[i]) == toe
5240 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5242 struct elf_segment_map *m2;
5245 if (i + 1 < m->count)
5247 amt = sizeof (struct elf_segment_map);
5248 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5249 m2 = bfd_zalloc (abfd, amt);
5252 m2->count = m->count - (i + 1);
5253 memcpy (m2->sections, m->sections + i + 1,
5254 m2->count * sizeof (m->sections[0]));
5255 m2->p_type = PT_LOAD;
5263 amt = sizeof (struct elf_segment_map);
5264 m2 = bfd_zalloc (abfd, amt);
5267 m2->p_type = PT_LOAD;
5269 m2->sections[0] = s;
5277 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5278 PT_LOAD segments. This can cause the .ovl.init section to be
5279 overwritten with the contents of some overlay segment. To work
5280 around this issue, we ensure that all PF_OVERLAY segments are
5281 sorted first amongst the program headers; this ensures that even
5282 with a broken loader, the .ovl.init section (which is not marked
5283 as PF_OVERLAY) will be placed into SPU local store on startup. */
5285 /* Move all overlay segments onto a separate list. */
5286 p = &elf_seg_map (abfd);
5287 p_overlay = &m_overlay;
5290 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5291 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5296 p_overlay = &m->next;
5303 /* Re-insert overlay segments at the head of the segment map. */
5304 *p_overlay = elf_seg_map (abfd);
5305 elf_seg_map (abfd) = m_overlay;
5310 /* Tweak the section type of .note.spu_name. */
5313 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5314 Elf_Internal_Shdr *hdr,
5317 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5318 hdr->sh_type = SHT_NOTE;
5322 /* Tweak phdrs before writing them out. */
5325 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5327 const struct elf_backend_data *bed;
5328 struct elf_obj_tdata *tdata;
5329 Elf_Internal_Phdr *phdr, *last;
5330 struct spu_link_hash_table *htab;
5337 bed = get_elf_backend_data (abfd);
5338 tdata = elf_tdata (abfd);
5340 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5341 htab = spu_hash_table (info);
5342 if (htab->num_overlays != 0)
5344 struct elf_segment_map *m;
5347 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5349 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5351 /* Mark this as an overlay header. */
5352 phdr[i].p_flags |= PF_OVERLAY;
5354 if (htab->ovtab != NULL && htab->ovtab->size != 0
5355 && htab->params->ovly_flavour != ovly_soft_icache)
5357 bfd_byte *p = htab->ovtab->contents;
5358 unsigned int off = o * 16 + 8;
5360 /* Write file_off into _ovly_table. */
5361 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5364 /* Soft-icache has its file offset put in .ovl.init. */
5365 if (htab->init != NULL && htab->init->size != 0)
5367 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5369 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5373 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5374 of 16. This should always be possible when using the standard
5375 linker scripts, but don't create overlapping segments if
5376 someone is playing games with linker scripts. */
5378 for (i = count; i-- != 0; )
5379 if (phdr[i].p_type == PT_LOAD)
5383 adjust = -phdr[i].p_filesz & 15;
5386 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5389 adjust = -phdr[i].p_memsz & 15;
5392 && phdr[i].p_filesz != 0
5393 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5394 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5397 if (phdr[i].p_filesz != 0)
5401 if (i == (unsigned int) -1)
5402 for (i = count; i-- != 0; )
5403 if (phdr[i].p_type == PT_LOAD)
5407 adjust = -phdr[i].p_filesz & 15;
5408 phdr[i].p_filesz += adjust;
5410 adjust = -phdr[i].p_memsz & 15;
5411 phdr[i].p_memsz += adjust;
5418 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5420 struct spu_link_hash_table *htab = spu_hash_table (info);
5421 if (htab->params->emit_fixups)
5423 asection *sfixup = htab->sfixup;
5424 int fixup_count = 0;
5428 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5432 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5435 /* Walk over each section attached to the input bfd. */
5436 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5438 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5441 /* If there aren't any relocs, then there's nothing more
5443 if ((isec->flags & SEC_ALLOC) == 0
5444 || (isec->flags & SEC_RELOC) == 0
5445 || isec->reloc_count == 0)
5448 /* Get the relocs. */
5450 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5452 if (internal_relocs == NULL)
5455 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5456 relocations. They are stored in a single word by
5457 saving the upper 28 bits of the address and setting the
5458 lower 4 bits to a bit mask of the words that have the
5459 relocation. BASE_END keeps track of the next quadword. */
5460 irela = internal_relocs;
5461 irelaend = irela + isec->reloc_count;
5463 for (; irela < irelaend; irela++)
5464 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5465 && irela->r_offset >= base_end)
5467 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5473 /* We always have a NULL fixup as a sentinel */
5474 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5475 if (!bfd_set_section_size (output_bfd, sfixup, size))
5477 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5478 if (sfixup->contents == NULL)
5484 #define TARGET_BIG_SYM spu_elf32_vec
5485 #define TARGET_BIG_NAME "elf32-spu"
5486 #define ELF_ARCH bfd_arch_spu
5487 #define ELF_TARGET_ID SPU_ELF_DATA
5488 #define ELF_MACHINE_CODE EM_SPU
5489 /* This matches the alignment need for DMA. */
5490 #define ELF_MAXPAGESIZE 0x80
5491 #define elf_backend_rela_normal 1
5492 #define elf_backend_can_gc_sections 1
5494 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5495 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5496 #define elf_info_to_howto spu_elf_info_to_howto
5497 #define elf_backend_count_relocs spu_elf_count_relocs
5498 #define elf_backend_relocate_section spu_elf_relocate_section
5499 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5500 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5501 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5502 #define elf_backend_object_p spu_elf_object_p
5503 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5504 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5506 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5507 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5508 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5509 #define elf_backend_post_process_headers spu_elf_post_process_headers
5510 #define elf_backend_fake_sections spu_elf_fake_sections
5511 #define elf_backend_special_sections spu_elf_special_sections
5512 #define bfd_elf32_bfd_final_link spu_elf_final_link
5514 #include "elf32-target.h"