1 /* SPU specific support for 32-bit ELF
3 Copyright (C) 2006-2017 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
108 return (enum elf_spu_reloc_type) -1;
111 case BFD_RELOC_SPU_IMM10W:
113 case BFD_RELOC_SPU_IMM16W:
115 case BFD_RELOC_SPU_LO16:
116 return R_SPU_ADDR16_LO;
117 case BFD_RELOC_SPU_HI16:
118 return R_SPU_ADDR16_HI;
119 case BFD_RELOC_SPU_IMM18:
121 case BFD_RELOC_SPU_PCREL16:
123 case BFD_RELOC_SPU_IMM7:
125 case BFD_RELOC_SPU_IMM8:
127 case BFD_RELOC_SPU_PCREL9a:
129 case BFD_RELOC_SPU_PCREL9b:
131 case BFD_RELOC_SPU_IMM10:
132 return R_SPU_ADDR10I;
133 case BFD_RELOC_SPU_IMM16:
134 return R_SPU_ADDR16I;
137 case BFD_RELOC_32_PCREL:
139 case BFD_RELOC_SPU_PPU32:
141 case BFD_RELOC_SPU_PPU64:
143 case BFD_RELOC_SPU_ADD_PIC:
144 return R_SPU_ADD_PIC;
149 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
151 Elf_Internal_Rela *dst)
153 enum elf_spu_reloc_type r_type;
155 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
156 /* PR 17512: file: 90c2a92e. */
157 if (r_type >= R_SPU_max)
159 /* xgettext:c-format */
160 _bfd_error_handler (_("%B: unrecognised SPU reloc number: %d"),
162 bfd_set_error (bfd_error_bad_value);
165 cache_ptr->howto = &elf_howto_table[(int) r_type];
168 static reloc_howto_type *
169 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
170 bfd_reloc_code_real_type code)
172 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
174 if (r_type == (enum elf_spu_reloc_type) -1)
177 return elf_howto_table + r_type;
180 static reloc_howto_type *
181 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
186 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
187 if (elf_howto_table[i].name != NULL
188 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
189 return &elf_howto_table[i];
194 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
196 static bfd_reloc_status_type
197 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
198 void *data, asection *input_section,
199 bfd *output_bfd, char **error_message)
201 bfd_size_type octets;
205 /* If this is a relocatable link (output_bfd test tells us), just
206 call the generic function. Any adjustment will be done at final
208 if (output_bfd != NULL)
209 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
210 input_section, output_bfd, error_message);
212 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
213 return bfd_reloc_outofrange;
214 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
216 /* Get symbol value. */
218 if (!bfd_is_com_section (symbol->section))
220 if (symbol->section->output_section)
221 val += symbol->section->output_section->vma;
223 val += reloc_entry->addend;
225 /* Make it pc-relative. */
226 val -= input_section->output_section->vma + input_section->output_offset;
229 if (val + 256 >= 512)
230 return bfd_reloc_overflow;
232 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
234 /* Move two high bits of value to REL9I and REL9 position.
235 The mask will take care of selecting the right field. */
236 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
237 insn &= ~reloc_entry->howto->dst_mask;
238 insn |= val & reloc_entry->howto->dst_mask;
239 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
244 spu_elf_new_section_hook (bfd *abfd, asection *sec)
246 if (!sec->used_by_bfd)
248 struct _spu_elf_section_data *sdata;
250 sdata = bfd_zalloc (abfd, sizeof (*sdata));
253 sec->used_by_bfd = sdata;
256 return _bfd_elf_new_section_hook (abfd, sec);
259 /* Set up overlay info for executables. */
262 spu_elf_object_p (bfd *abfd)
264 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
266 unsigned int i, num_ovl, num_buf;
267 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
268 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
269 Elf_Internal_Phdr *last_phdr = NULL;
271 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
272 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
277 if (last_phdr == NULL
278 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
281 for (j = 1; j < elf_numsections (abfd); j++)
283 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
285 if (ELF_SECTION_SIZE (shdr, phdr) != 0
286 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
288 asection *sec = shdr->bfd_section;
289 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
290 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
298 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
299 strip --strip-unneeded will not remove them. */
302 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
304 if (sym->name != NULL
305 && sym->section != bfd_abs_section_ptr
306 && strncmp (sym->name, "_EAR_", 5) == 0)
307 sym->flags |= BSF_KEEP;
310 /* SPU ELF linker hash table. */
312 struct spu_link_hash_table
314 struct elf_link_hash_table elf;
316 struct spu_elf_params *params;
318 /* Shortcuts to overlay sections. */
324 /* Count of stubs in each overlay section. */
325 unsigned int *stub_count;
327 /* The stub section for each overlay section. */
330 struct elf_link_hash_entry *ovly_entry[2];
332 /* Number of overlay buffers. */
333 unsigned int num_buf;
335 /* Total number of overlays. */
336 unsigned int num_overlays;
338 /* For soft icache. */
339 unsigned int line_size_log2;
340 unsigned int num_lines_log2;
341 unsigned int fromelem_size_log2;
343 /* How much memory we have. */
344 unsigned int local_store;
346 /* Count of overlay stubs needed in non-overlay area. */
347 unsigned int non_ovly_stub;
349 /* Pointer to the fixup section */
353 unsigned int stub_err : 1;
356 /* Hijack the generic got fields for overlay stub accounting. */
360 struct got_entry *next;
369 #define spu_hash_table(p) \
370 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
371 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
375 struct function_info *fun;
376 struct call_info *next;
378 unsigned int max_depth;
379 unsigned int is_tail : 1;
380 unsigned int is_pasted : 1;
381 unsigned int broken_cycle : 1;
382 unsigned int priority : 13;
387 /* List of functions called. Also branches to hot/cold part of
389 struct call_info *call_list;
390 /* For hot/cold part of function, point to owner. */
391 struct function_info *start;
392 /* Symbol at start of function. */
394 Elf_Internal_Sym *sym;
395 struct elf_link_hash_entry *h;
397 /* Function section. */
400 /* Where last called from, and number of sections called from. */
401 asection *last_caller;
402 unsigned int call_count;
403 /* Address range of (this part of) function. */
405 /* Offset where we found a store of lr, or -1 if none found. */
407 /* Offset where we found the stack adjustment insn. */
411 /* Distance from root of call tree. Tail and hot/cold branches
412 count as one deeper. We aren't counting stack frames here. */
414 /* Set if global symbol. */
415 unsigned int global : 1;
416 /* Set if known to be start of function (as distinct from a hunk
417 in hot/cold section. */
418 unsigned int is_func : 1;
419 /* Set if not a root node. */
420 unsigned int non_root : 1;
421 /* Flags used during call tree traversal. It's cheaper to replicate
422 the visit flags than have one which needs clearing after a traversal. */
423 unsigned int visit1 : 1;
424 unsigned int visit2 : 1;
425 unsigned int marking : 1;
426 unsigned int visit3 : 1;
427 unsigned int visit4 : 1;
428 unsigned int visit5 : 1;
429 unsigned int visit6 : 1;
430 unsigned int visit7 : 1;
433 struct spu_elf_stack_info
437 /* Variable size array describing functions, one per contiguous
438 address range belonging to a function. */
439 struct function_info fun[1];
442 static struct function_info *find_function (asection *, bfd_vma,
443 struct bfd_link_info *);
445 /* Create a spu ELF linker hash table. */
447 static struct bfd_link_hash_table *
448 spu_elf_link_hash_table_create (bfd *abfd)
450 struct spu_link_hash_table *htab;
452 htab = bfd_zmalloc (sizeof (*htab));
456 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
457 _bfd_elf_link_hash_newfunc,
458 sizeof (struct elf_link_hash_entry),
465 htab->elf.init_got_refcount.refcount = 0;
466 htab->elf.init_got_refcount.glist = NULL;
467 htab->elf.init_got_offset.offset = 0;
468 htab->elf.init_got_offset.glist = NULL;
469 return &htab->elf.root;
473 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
475 bfd_vma max_branch_log2;
477 struct spu_link_hash_table *htab = spu_hash_table (info);
478 htab->params = params;
479 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
480 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
482 /* For the software i-cache, we provide a "from" list whose size
483 is a power-of-two number of quadwords, big enough to hold one
484 byte per outgoing branch. Compute this number here. */
485 max_branch_log2 = bfd_log2 (htab->params->max_branch);
486 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
489 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
490 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
491 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
494 get_sym_h (struct elf_link_hash_entry **hp,
495 Elf_Internal_Sym **symp,
497 Elf_Internal_Sym **locsymsp,
498 unsigned long r_symndx,
501 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
503 if (r_symndx >= symtab_hdr->sh_info)
505 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
506 struct elf_link_hash_entry *h;
508 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
509 while (h->root.type == bfd_link_hash_indirect
510 || h->root.type == bfd_link_hash_warning)
511 h = (struct elf_link_hash_entry *) h->root.u.i.link;
521 asection *symsec = NULL;
522 if (h->root.type == bfd_link_hash_defined
523 || h->root.type == bfd_link_hash_defweak)
524 symsec = h->root.u.def.section;
530 Elf_Internal_Sym *sym;
531 Elf_Internal_Sym *locsyms = *locsymsp;
535 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
537 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
539 0, NULL, NULL, NULL);
544 sym = locsyms + r_symndx;
553 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
559 /* Create the note section if not already present. This is done early so
560 that the linker maps the sections to the right place in the output. */
563 spu_elf_create_sections (struct bfd_link_info *info)
565 struct spu_link_hash_table *htab = spu_hash_table (info);
568 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
569 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
574 /* Make SPU_PTNOTE_SPUNAME section. */
581 ibfd = info->input_bfds;
582 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
583 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
585 || !bfd_set_section_alignment (ibfd, s, 4))
588 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
589 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
590 size += (name_len + 3) & -4;
592 if (!bfd_set_section_size (ibfd, s, size))
595 data = bfd_zalloc (ibfd, size);
599 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
600 bfd_put_32 (ibfd, name_len, data + 4);
601 bfd_put_32 (ibfd, 1, data + 8);
602 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
603 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
604 bfd_get_filename (info->output_bfd), name_len);
608 if (htab->params->emit_fixups)
613 if (htab->elf.dynobj == NULL)
614 htab->elf.dynobj = ibfd;
615 ibfd = htab->elf.dynobj;
616 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
617 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
618 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
619 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
627 /* qsort predicate to sort sections by vma. */
630 sort_sections (const void *a, const void *b)
632 const asection *const *s1 = a;
633 const asection *const *s2 = b;
634 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
637 return delta < 0 ? -1 : 1;
639 return (*s1)->index - (*s2)->index;
642 /* Identify overlays in the output bfd, and number them.
643 Returns 0 on error, 1 if no overlays, 2 if overlays. */
646 spu_elf_find_overlays (struct bfd_link_info *info)
648 struct spu_link_hash_table *htab = spu_hash_table (info);
649 asection **alloc_sec;
650 unsigned int i, n, ovl_index, num_buf;
653 static const char *const entry_names[2][2] = {
654 { "__ovly_load", "__icache_br_handler" },
655 { "__ovly_return", "__icache_call_handler" }
658 if (info->output_bfd->section_count < 2)
662 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
663 if (alloc_sec == NULL)
666 /* Pick out all the alloced sections. */
667 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
668 if ((s->flags & SEC_ALLOC) != 0
669 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
679 /* Sort them by vma. */
680 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
682 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
683 if (htab->params->ovly_flavour == ovly_soft_icache)
685 unsigned int prev_buf = 0, set_id = 0;
687 /* Look for an overlapping vma to find the first overlay section. */
688 bfd_vma vma_start = 0;
690 for (i = 1; i < n; i++)
693 if (s->vma < ovl_end)
695 asection *s0 = alloc_sec[i - 1];
699 << (htab->num_lines_log2 + htab->line_size_log2)));
704 ovl_end = s->vma + s->size;
707 /* Now find any sections within the cache area. */
708 for (ovl_index = 0, num_buf = 0; i < n; i++)
711 if (s->vma >= ovl_end)
714 /* A section in an overlay area called .ovl.init is not
715 an overlay, in the sense that it might be loaded in
716 by the overlay manager, but rather the initial
717 section contents for the overlay buffer. */
718 if (strncmp (s->name, ".ovl.init", 9) != 0)
720 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
721 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
724 if ((s->vma - vma_start) & (htab->params->line_size - 1))
726 info->callbacks->einfo (_("%X%P: overlay section %A "
727 "does not start on a cache line.\n"),
729 bfd_set_error (bfd_error_bad_value);
732 else if (s->size > htab->params->line_size)
734 info->callbacks->einfo (_("%X%P: overlay section %A "
735 "is larger than a cache line.\n"),
737 bfd_set_error (bfd_error_bad_value);
741 alloc_sec[ovl_index++] = s;
742 spu_elf_section_data (s)->u.o.ovl_index
743 = (set_id << htab->num_lines_log2) + num_buf;
744 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
748 /* Ensure there are no more overlay sections. */
752 if (s->vma < ovl_end)
754 info->callbacks->einfo (_("%X%P: overlay section %A "
755 "is not in cache area.\n"),
757 bfd_set_error (bfd_error_bad_value);
761 ovl_end = s->vma + s->size;
766 /* Look for overlapping vmas. Any with overlap must be overlays.
767 Count them. Also count the number of overlay regions. */
768 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
771 if (s->vma < ovl_end)
773 asection *s0 = alloc_sec[i - 1];
775 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
778 if (strncmp (s0->name, ".ovl.init", 9) != 0)
780 alloc_sec[ovl_index] = s0;
781 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
782 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
785 ovl_end = s->vma + s->size;
787 if (strncmp (s->name, ".ovl.init", 9) != 0)
789 alloc_sec[ovl_index] = s;
790 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
791 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
792 if (s0->vma != s->vma)
794 /* xgettext:c-format */
795 info->callbacks->einfo (_("%X%P: overlay sections %A "
796 "and %A do not start at the "
799 bfd_set_error (bfd_error_bad_value);
802 if (ovl_end < s->vma + s->size)
803 ovl_end = s->vma + s->size;
807 ovl_end = s->vma + s->size;
811 htab->num_overlays = ovl_index;
812 htab->num_buf = num_buf;
813 htab->ovl_sec = alloc_sec;
818 for (i = 0; i < 2; i++)
821 struct elf_link_hash_entry *h;
823 name = entry_names[i][htab->params->ovly_flavour];
824 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
828 if (h->root.type == bfd_link_hash_new)
830 h->root.type = bfd_link_hash_undefined;
832 h->ref_regular_nonweak = 1;
835 htab->ovly_entry[i] = h;
841 /* Non-zero to use bra in overlay stubs rather than br. */
844 #define BRA 0x30000000
845 #define BRASL 0x31000000
846 #define BR 0x32000000
847 #define BRSL 0x33000000
848 #define NOP 0x40200000
849 #define LNOP 0x00200000
850 #define ILA 0x42000000
852 /* Return true for all relative and absolute branch instructions.
860 brhnz 00100011 0.. */
863 is_branch (const unsigned char *insn)
865 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
868 /* Return true for all indirect branch instructions.
876 bihnz 00100101 011 */
879 is_indirect_branch (const unsigned char *insn)
881 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
884 /* Return true for branch hint instructions.
889 is_hint (const unsigned char *insn)
891 return (insn[0] & 0xfc) == 0x10;
894 /* True if INPUT_SECTION might need overlay stubs. */
897 maybe_needs_stubs (asection *input_section)
899 /* No stubs for debug sections and suchlike. */
900 if ((input_section->flags & SEC_ALLOC) == 0)
903 /* No stubs for link-once sections that will be discarded. */
904 if (input_section->output_section == bfd_abs_section_ptr)
907 /* Don't create stubs for .eh_frame references. */
908 if (strcmp (input_section->name, ".eh_frame") == 0)
930 /* Return non-zero if this reloc symbol should go via an overlay stub.
931 Return 2 if the stub must be in non-overlay area. */
933 static enum _stub_type
934 needs_ovl_stub (struct elf_link_hash_entry *h,
935 Elf_Internal_Sym *sym,
937 asection *input_section,
938 Elf_Internal_Rela *irela,
940 struct bfd_link_info *info)
942 struct spu_link_hash_table *htab = spu_hash_table (info);
943 enum elf_spu_reloc_type r_type;
944 unsigned int sym_type;
945 bfd_boolean branch, hint, call;
946 enum _stub_type ret = no_stub;
950 || sym_sec->output_section == bfd_abs_section_ptr
951 || spu_elf_section_data (sym_sec->output_section) == NULL)
956 /* Ensure no stubs for user supplied overlay manager syms. */
957 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
960 /* setjmp always goes via an overlay stub, because then the return
961 and hence the longjmp goes via __ovly_return. That magically
962 makes setjmp/longjmp between overlays work. */
963 if (strncmp (h->root.root.string, "setjmp", 6) == 0
964 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
971 sym_type = ELF_ST_TYPE (sym->st_info);
973 r_type = ELF32_R_TYPE (irela->r_info);
977 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
979 if (contents == NULL)
982 if (!bfd_get_section_contents (input_section->owner,
989 contents += irela->r_offset;
991 branch = is_branch (contents);
992 hint = is_hint (contents);
995 call = (contents[0] & 0xfd) == 0x31;
997 && sym_type != STT_FUNC
1000 /* It's common for people to write assembly and forget
1001 to give function symbols the right type. Handle
1002 calls to such symbols, but warn so that (hopefully)
1003 people will fix their code. We need the symbol
1004 type to be correct to distinguish function pointer
1005 initialisation from other pointer initialisations. */
1006 const char *sym_name;
1009 sym_name = h->root.root.string;
1012 Elf_Internal_Shdr *symtab_hdr;
1013 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1014 sym_name = bfd_elf_sym_name (input_section->owner,
1020 /* xgettext:c-format */
1021 (_("warning: call to non-function symbol %s defined in %B"),
1022 sym_name, sym_sec->owner);
1028 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1029 || (sym_type != STT_FUNC
1030 && !(branch || hint)
1031 && (sym_sec->flags & SEC_CODE) == 0))
1034 /* Usually, symbols in non-overlay sections don't need stubs. */
1035 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1036 && !htab->params->non_overlay_stubs)
1039 /* A reference from some other section to a symbol in an overlay
1040 section needs a stub. */
1041 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1042 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1044 unsigned int lrlive = 0;
1046 lrlive = (contents[1] & 0x70) >> 4;
1048 if (!lrlive && (call || sym_type == STT_FUNC))
1049 ret = call_ovl_stub;
1051 ret = br000_ovl_stub + lrlive;
1054 /* If this insn isn't a branch then we are possibly taking the
1055 address of a function and passing it out somehow. Soft-icache code
1056 always generates inline code to do indirect branches. */
1057 if (!(branch || hint)
1058 && sym_type == STT_FUNC
1059 && htab->params->ovly_flavour != ovly_soft_icache)
1066 count_stub (struct spu_link_hash_table *htab,
1069 enum _stub_type stub_type,
1070 struct elf_link_hash_entry *h,
1071 const Elf_Internal_Rela *irela)
1073 unsigned int ovl = 0;
1074 struct got_entry *g, **head;
1077 /* If this instruction is a branch or call, we need a stub
1078 for it. One stub per function per overlay.
1079 If it isn't a branch, then we are taking the address of
1080 this function so need a stub in the non-overlay area
1081 for it. One stub per function. */
1082 if (stub_type != nonovl_stub)
1083 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1086 head = &h->got.glist;
1089 if (elf_local_got_ents (ibfd) == NULL)
1091 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1092 * sizeof (*elf_local_got_ents (ibfd)));
1093 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1094 if (elf_local_got_ents (ibfd) == NULL)
1097 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1100 if (htab->params->ovly_flavour == ovly_soft_icache)
1102 htab->stub_count[ovl] += 1;
1108 addend = irela->r_addend;
1112 struct got_entry *gnext;
1114 for (g = *head; g != NULL; g = g->next)
1115 if (g->addend == addend && g->ovl == 0)
1120 /* Need a new non-overlay area stub. Zap other stubs. */
1121 for (g = *head; g != NULL; g = gnext)
1124 if (g->addend == addend)
1126 htab->stub_count[g->ovl] -= 1;
1134 for (g = *head; g != NULL; g = g->next)
1135 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1141 g = bfd_malloc (sizeof *g);
1146 g->stub_addr = (bfd_vma) -1;
1150 htab->stub_count[ovl] += 1;
1156 /* Support two sizes of overlay stubs, a slower more compact stub of two
1157 instructions, and a faster stub of four instructions.
1158 Soft-icache stubs are four or eight words. */
1161 ovl_stub_size (struct spu_elf_params *params)
1163 return 16 << params->ovly_flavour >> params->compact_stub;
1167 ovl_stub_size_log2 (struct spu_elf_params *params)
1169 return 4 + params->ovly_flavour - params->compact_stub;
1172 /* Two instruction overlay stubs look like:
1174 brsl $75,__ovly_load
1175 .word target_ovl_and_address
1177 ovl_and_address is a word with the overlay number in the top 14 bits
1178 and local store address in the bottom 18 bits.
1180 Four instruction overlay stubs look like:
1184 ila $79,target_address
1187 Software icache stubs are:
1191 .word lrlive_branchlocalstoreaddr;
1192 brasl $75,__icache_br_handler
1197 build_stub (struct bfd_link_info *info,
1200 enum _stub_type stub_type,
1201 struct elf_link_hash_entry *h,
1202 const Elf_Internal_Rela *irela,
1206 struct spu_link_hash_table *htab = spu_hash_table (info);
1207 unsigned int ovl, dest_ovl, set_id;
1208 struct got_entry *g, **head;
1210 bfd_vma addend, from, to, br_dest, patt;
1211 unsigned int lrlive;
1214 if (stub_type != nonovl_stub)
1215 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1218 head = &h->got.glist;
1220 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1224 addend = irela->r_addend;
1226 if (htab->params->ovly_flavour == ovly_soft_icache)
1228 g = bfd_malloc (sizeof *g);
1234 g->br_addr = (irela->r_offset
1235 + isec->output_offset
1236 + isec->output_section->vma);
1242 for (g = *head; g != NULL; g = g->next)
1243 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1248 if (g->ovl == 0 && ovl != 0)
1251 if (g->stub_addr != (bfd_vma) -1)
1255 sec = htab->stub_sec[ovl];
1256 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1257 from = sec->size + sec->output_offset + sec->output_section->vma;
1258 g->stub_addr = from;
1259 to = (htab->ovly_entry[0]->root.u.def.value
1260 + htab->ovly_entry[0]->root.u.def.section->output_offset
1261 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1263 if (((dest | to | from) & 3) != 0)
1268 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1270 if (htab->params->ovly_flavour == ovly_normal
1271 && !htab->params->compact_stub)
1273 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1274 sec->contents + sec->size);
1275 bfd_put_32 (sec->owner, LNOP,
1276 sec->contents + sec->size + 4);
1277 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1278 sec->contents + sec->size + 8);
1280 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1281 sec->contents + sec->size + 12);
1283 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1284 sec->contents + sec->size + 12);
1286 else if (htab->params->ovly_flavour == ovly_normal
1287 && htab->params->compact_stub)
1290 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1291 sec->contents + sec->size);
1293 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1294 sec->contents + sec->size);
1295 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1296 sec->contents + sec->size + 4);
1298 else if (htab->params->ovly_flavour == ovly_soft_icache
1299 && htab->params->compact_stub)
1302 if (stub_type == nonovl_stub)
1304 else if (stub_type == call_ovl_stub)
1305 /* A brsl makes lr live and *(*sp+16) is live.
1306 Tail calls have the same liveness. */
1308 else if (!htab->params->lrlive_analysis)
1309 /* Assume stack frame and lr save. */
1311 else if (irela != NULL)
1313 /* Analyse branch instructions. */
1314 struct function_info *caller;
1317 caller = find_function (isec, irela->r_offset, info);
1318 if (caller->start == NULL)
1319 off = irela->r_offset;
1322 struct function_info *found = NULL;
1324 /* Find the earliest piece of this function that
1325 has frame adjusting instructions. We might
1326 see dynamic frame adjustment (eg. for alloca)
1327 in some later piece, but functions using
1328 alloca always set up a frame earlier. Frame
1329 setup instructions are always in one piece. */
1330 if (caller->lr_store != (bfd_vma) -1
1331 || caller->sp_adjust != (bfd_vma) -1)
1333 while (caller->start != NULL)
1335 caller = caller->start;
1336 if (caller->lr_store != (bfd_vma) -1
1337 || caller->sp_adjust != (bfd_vma) -1)
1345 if (off > caller->sp_adjust)
1347 if (off > caller->lr_store)
1348 /* Only *(*sp+16) is live. */
1351 /* If no lr save, then we must be in a
1352 leaf function with a frame.
1353 lr is still live. */
1356 else if (off > caller->lr_store)
1358 /* Between lr save and stack adjust. */
1360 /* This should never happen since prologues won't
1365 /* On entry to function. */
1368 if (stub_type != br000_ovl_stub
1369 && lrlive != stub_type - br000_ovl_stub)
1370 /* xgettext:c-format */
1371 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1372 "from analysis (%u)\n"),
1373 isec, irela->r_offset, lrlive,
1374 stub_type - br000_ovl_stub);
1377 /* If given lrlive info via .brinfo, use it. */
1378 if (stub_type > br000_ovl_stub)
1379 lrlive = stub_type - br000_ovl_stub;
1382 to = (htab->ovly_entry[1]->root.u.def.value
1383 + htab->ovly_entry[1]->root.u.def.section->output_offset
1384 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1386 /* The branch that uses this stub goes to stub_addr + 4. We'll
1387 set up an xor pattern that can be used by the icache manager
1388 to modify this branch to go directly to its destination. */
1390 br_dest = g->stub_addr;
1393 /* Except in the case of _SPUEAR_ stubs, the branch in
1394 question is the one in the stub itself. */
1395 BFD_ASSERT (stub_type == nonovl_stub);
1396 g->br_addr = g->stub_addr;
1400 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1401 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1402 sec->contents + sec->size);
1403 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1404 sec->contents + sec->size + 4);
1405 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1406 sec->contents + sec->size + 8);
1407 patt = dest ^ br_dest;
1408 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1409 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1410 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1411 sec->contents + sec->size + 12);
1414 /* Extra space for linked list entries. */
1420 sec->size += ovl_stub_size (htab->params);
1422 if (htab->params->emit_stub_syms)
1428 len = 8 + sizeof (".ovl_call.") - 1;
1430 len += strlen (h->root.root.string);
1435 add = (int) irela->r_addend & 0xffffffff;
1438 name = bfd_malloc (len + 1);
1442 sprintf (name, "%08x.ovl_call.", g->ovl);
1444 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1446 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1447 dest_sec->id & 0xffffffff,
1448 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1450 sprintf (name + len - 9, "+%x", add);
1452 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1456 if (h->root.type == bfd_link_hash_new)
1458 h->root.type = bfd_link_hash_defined;
1459 h->root.u.def.section = sec;
1460 h->size = ovl_stub_size (htab->params);
1461 h->root.u.def.value = sec->size - h->size;
1465 h->ref_regular_nonweak = 1;
1466 h->forced_local = 1;
1474 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1478 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1480 /* Symbols starting with _SPUEAR_ need a stub because they may be
1481 invoked by the PPU. */
1482 struct bfd_link_info *info = inf;
1483 struct spu_link_hash_table *htab = spu_hash_table (info);
1486 if ((h->root.type == bfd_link_hash_defined
1487 || h->root.type == bfd_link_hash_defweak)
1489 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1490 && (sym_sec = h->root.u.def.section) != NULL
1491 && sym_sec->output_section != bfd_abs_section_ptr
1492 && spu_elf_section_data (sym_sec->output_section) != NULL
1493 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1494 || htab->params->non_overlay_stubs))
1496 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1503 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1505 /* Symbols starting with _SPUEAR_ need a stub because they may be
1506 invoked by the PPU. */
1507 struct bfd_link_info *info = inf;
1508 struct spu_link_hash_table *htab = spu_hash_table (info);
1511 if ((h->root.type == bfd_link_hash_defined
1512 || h->root.type == bfd_link_hash_defweak)
1514 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1515 && (sym_sec = h->root.u.def.section) != NULL
1516 && sym_sec->output_section != bfd_abs_section_ptr
1517 && spu_elf_section_data (sym_sec->output_section) != NULL
1518 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1519 || htab->params->non_overlay_stubs))
1521 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1522 h->root.u.def.value, sym_sec);
1528 /* Size or build stubs. */
1531 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1533 struct spu_link_hash_table *htab = spu_hash_table (info);
1536 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1538 extern const bfd_target spu_elf32_vec;
1539 Elf_Internal_Shdr *symtab_hdr;
1541 Elf_Internal_Sym *local_syms = NULL;
1543 if (ibfd->xvec != &spu_elf32_vec)
1546 /* We'll need the symbol table in a second. */
1547 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1548 if (symtab_hdr->sh_info == 0)
1551 /* Walk over each section attached to the input bfd. */
1552 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1554 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1556 /* If there aren't any relocs, then there's nothing more to do. */
1557 if ((isec->flags & SEC_RELOC) == 0
1558 || isec->reloc_count == 0)
1561 if (!maybe_needs_stubs (isec))
1564 /* Get the relocs. */
1565 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1567 if (internal_relocs == NULL)
1568 goto error_ret_free_local;
1570 /* Now examine each relocation. */
1571 irela = internal_relocs;
1572 irelaend = irela + isec->reloc_count;
1573 for (; irela < irelaend; irela++)
1575 enum elf_spu_reloc_type r_type;
1576 unsigned int r_indx;
1578 Elf_Internal_Sym *sym;
1579 struct elf_link_hash_entry *h;
1580 enum _stub_type stub_type;
1582 r_type = ELF32_R_TYPE (irela->r_info);
1583 r_indx = ELF32_R_SYM (irela->r_info);
1585 if (r_type >= R_SPU_max)
1587 bfd_set_error (bfd_error_bad_value);
1588 error_ret_free_internal:
1589 if (elf_section_data (isec)->relocs != internal_relocs)
1590 free (internal_relocs);
1591 error_ret_free_local:
1592 if (local_syms != NULL
1593 && (symtab_hdr->contents
1594 != (unsigned char *) local_syms))
1599 /* Determine the reloc target section. */
1600 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1601 goto error_ret_free_internal;
1603 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1605 if (stub_type == no_stub)
1607 else if (stub_type == stub_error)
1608 goto error_ret_free_internal;
1610 if (htab->stub_count == NULL)
1613 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1614 htab->stub_count = bfd_zmalloc (amt);
1615 if (htab->stub_count == NULL)
1616 goto error_ret_free_internal;
1621 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1622 goto error_ret_free_internal;
1629 dest = h->root.u.def.value;
1631 dest = sym->st_value;
1632 dest += irela->r_addend;
1633 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1635 goto error_ret_free_internal;
1639 /* We're done with the internal relocs, free them. */
1640 if (elf_section_data (isec)->relocs != internal_relocs)
1641 free (internal_relocs);
1644 if (local_syms != NULL
1645 && symtab_hdr->contents != (unsigned char *) local_syms)
1647 if (!info->keep_memory)
1650 symtab_hdr->contents = (unsigned char *) local_syms;
1657 /* Allocate space for overlay call and return stubs.
1658 Return 0 on error, 1 if no overlays, 2 otherwise. */
1661 spu_elf_size_stubs (struct bfd_link_info *info)
1663 struct spu_link_hash_table *htab;
1670 if (!process_stubs (info, FALSE))
1673 htab = spu_hash_table (info);
1674 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1678 ibfd = info->input_bfds;
1679 if (htab->stub_count != NULL)
1681 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1682 htab->stub_sec = bfd_zmalloc (amt);
1683 if (htab->stub_sec == NULL)
1686 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1687 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1688 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1689 htab->stub_sec[0] = stub;
1691 || !bfd_set_section_alignment (ibfd, stub,
1692 ovl_stub_size_log2 (htab->params)))
1694 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1695 if (htab->params->ovly_flavour == ovly_soft_icache)
1696 /* Extra space for linked list entries. */
1697 stub->size += htab->stub_count[0] * 16;
1699 for (i = 0; i < htab->num_overlays; ++i)
1701 asection *osec = htab->ovl_sec[i];
1702 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1703 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1704 htab->stub_sec[ovl] = stub;
1706 || !bfd_set_section_alignment (ibfd, stub,
1707 ovl_stub_size_log2 (htab->params)))
1709 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1713 if (htab->params->ovly_flavour == ovly_soft_icache)
1715 /* Space for icache manager tables.
1716 a) Tag array, one quadword per cache line.
1717 b) Rewrite "to" list, one quadword per cache line.
1718 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1719 a power-of-two number of full quadwords) per cache line. */
1722 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1723 if (htab->ovtab == NULL
1724 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1727 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1728 << htab->num_lines_log2;
1730 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1731 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1732 if (htab->init == NULL
1733 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1736 htab->init->size = 16;
1738 else if (htab->stub_count == NULL)
1742 /* htab->ovtab consists of two arrays.
1752 . } _ovly_buf_table[];
1755 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1756 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1757 if (htab->ovtab == NULL
1758 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1761 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1764 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1765 if (htab->toe == NULL
1766 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1768 htab->toe->size = 16;
1773 /* Called from ld to place overlay manager data sections. This is done
1774 after the overlay manager itself is loaded, mainly so that the
1775 linker's htab->init section is placed after any other .ovl.init
1779 spu_elf_place_overlay_data (struct bfd_link_info *info)
1781 struct spu_link_hash_table *htab = spu_hash_table (info);
1784 if (htab->stub_sec != NULL)
1786 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1788 for (i = 0; i < htab->num_overlays; ++i)
1790 asection *osec = htab->ovl_sec[i];
1791 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1792 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1796 if (htab->params->ovly_flavour == ovly_soft_icache)
1797 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1799 if (htab->ovtab != NULL)
1801 const char *ovout = ".data";
1802 if (htab->params->ovly_flavour == ovly_soft_icache)
1804 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1807 if (htab->toe != NULL)
1808 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1811 /* Functions to handle embedded spu_ovl.o object. */
1814 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1820 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1826 struct _ovl_stream *os;
1830 os = (struct _ovl_stream *) stream;
1831 max = (const char *) os->end - (const char *) os->start;
1833 if ((ufile_ptr) offset >= max)
1837 if (count > max - offset)
1838 count = max - offset;
1840 memcpy (buf, (const char *) os->start + offset, count);
1845 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1849 struct _ovl_stream *os = (struct _ovl_stream *) stream;
1851 memset (sb, 0, sizeof (*sb));
1852 sb->st_size = (const char *) os->end - (const char *) os->start;
1857 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1859 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1866 return *ovl_bfd != NULL;
1870 overlay_index (asection *sec)
1873 || sec->output_section == bfd_abs_section_ptr)
1875 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1878 /* Define an STT_OBJECT symbol. */
1880 static struct elf_link_hash_entry *
1881 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1883 struct elf_link_hash_entry *h;
1885 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1889 if (h->root.type != bfd_link_hash_defined
1892 h->root.type = bfd_link_hash_defined;
1893 h->root.u.def.section = htab->ovtab;
1894 h->type = STT_OBJECT;
1897 h->ref_regular_nonweak = 1;
1900 else if (h->root.u.def.section->owner != NULL)
1902 /* xgettext:c-format */
1903 _bfd_error_handler (_("%B is not allowed to define %s"),
1904 h->root.u.def.section->owner,
1905 h->root.root.string);
1906 bfd_set_error (bfd_error_bad_value);
1911 _bfd_error_handler (_("you are not allowed to define %s in a script"),
1912 h->root.root.string);
1913 bfd_set_error (bfd_error_bad_value);
1920 /* Fill in all stubs and the overlay tables. */
1923 spu_elf_build_stubs (struct bfd_link_info *info)
1925 struct spu_link_hash_table *htab = spu_hash_table (info);
1926 struct elf_link_hash_entry *h;
1932 if (htab->num_overlays != 0)
1934 for (i = 0; i < 2; i++)
1936 h = htab->ovly_entry[i];
1938 && (h->root.type == bfd_link_hash_defined
1939 || h->root.type == bfd_link_hash_defweak)
1942 s = h->root.u.def.section->output_section;
1943 if (spu_elf_section_data (s)->u.o.ovl_index)
1945 _bfd_error_handler (_("%s in overlay section"),
1946 h->root.root.string);
1947 bfd_set_error (bfd_error_bad_value);
1954 if (htab->stub_sec != NULL)
1956 for (i = 0; i <= htab->num_overlays; i++)
1957 if (htab->stub_sec[i]->size != 0)
1959 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1960 htab->stub_sec[i]->size);
1961 if (htab->stub_sec[i]->contents == NULL)
1963 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1964 htab->stub_sec[i]->size = 0;
1967 /* Fill in all the stubs. */
1968 process_stubs (info, TRUE);
1969 if (!htab->stub_err)
1970 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1974 _bfd_error_handler (_("overlay stub relocation overflow"));
1975 bfd_set_error (bfd_error_bad_value);
1979 for (i = 0; i <= htab->num_overlays; i++)
1981 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1983 _bfd_error_handler (_("stubs don't match calculated size"));
1984 bfd_set_error (bfd_error_bad_value);
1987 htab->stub_sec[i]->rawsize = 0;
1991 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1994 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1995 if (htab->ovtab->contents == NULL)
1998 p = htab->ovtab->contents;
1999 if (htab->params->ovly_flavour == ovly_soft_icache)
2003 h = define_ovtab_symbol (htab, "__icache_tag_array");
2006 h->root.u.def.value = 0;
2007 h->size = 16 << htab->num_lines_log2;
2010 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2013 h->root.u.def.value = 16 << htab->num_lines_log2;
2014 h->root.u.def.section = bfd_abs_section_ptr;
2016 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2019 h->root.u.def.value = off;
2020 h->size = 16 << htab->num_lines_log2;
2023 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2026 h->root.u.def.value = 16 << htab->num_lines_log2;
2027 h->root.u.def.section = bfd_abs_section_ptr;
2029 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2032 h->root.u.def.value = off;
2033 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2036 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2039 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2040 + htab->num_lines_log2);
2041 h->root.u.def.section = bfd_abs_section_ptr;
2043 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2046 h->root.u.def.value = htab->fromelem_size_log2;
2047 h->root.u.def.section = bfd_abs_section_ptr;
2049 h = define_ovtab_symbol (htab, "__icache_base");
2052 h->root.u.def.value = htab->ovl_sec[0]->vma;
2053 h->root.u.def.section = bfd_abs_section_ptr;
2054 h->size = htab->num_buf << htab->line_size_log2;
2056 h = define_ovtab_symbol (htab, "__icache_linesize");
2059 h->root.u.def.value = 1 << htab->line_size_log2;
2060 h->root.u.def.section = bfd_abs_section_ptr;
2062 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2065 h->root.u.def.value = htab->line_size_log2;
2066 h->root.u.def.section = bfd_abs_section_ptr;
2068 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2071 h->root.u.def.value = -htab->line_size_log2;
2072 h->root.u.def.section = bfd_abs_section_ptr;
2074 h = define_ovtab_symbol (htab, "__icache_cachesize");
2077 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2078 h->root.u.def.section = bfd_abs_section_ptr;
2080 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2083 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2084 h->root.u.def.section = bfd_abs_section_ptr;
2086 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2089 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2090 h->root.u.def.section = bfd_abs_section_ptr;
2092 if (htab->init != NULL && htab->init->size != 0)
2094 htab->init->contents = bfd_zalloc (htab->init->owner,
2096 if (htab->init->contents == NULL)
2099 h = define_ovtab_symbol (htab, "__icache_fileoff");
2102 h->root.u.def.value = 0;
2103 h->root.u.def.section = htab->init;
2109 /* Write out _ovly_table. */
2110 /* set low bit of .size to mark non-overlay area as present. */
2112 obfd = htab->ovtab->output_section->owner;
2113 for (s = obfd->sections; s != NULL; s = s->next)
2115 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2119 unsigned long off = ovl_index * 16;
2120 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2122 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2123 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2125 /* file_off written later in spu_elf_modify_program_headers. */
2126 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2130 h = define_ovtab_symbol (htab, "_ovly_table");
2133 h->root.u.def.value = 16;
2134 h->size = htab->num_overlays * 16;
2136 h = define_ovtab_symbol (htab, "_ovly_table_end");
2139 h->root.u.def.value = htab->num_overlays * 16 + 16;
2142 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2145 h->root.u.def.value = htab->num_overlays * 16 + 16;
2146 h->size = htab->num_buf * 4;
2148 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2151 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2155 h = define_ovtab_symbol (htab, "_EAR_");
2158 h->root.u.def.section = htab->toe;
2159 h->root.u.def.value = 0;
2165 /* Check that all loadable section VMAs lie in the range
2166 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2169 spu_elf_check_vma (struct bfd_link_info *info)
2171 struct elf_segment_map *m;
2173 struct spu_link_hash_table *htab = spu_hash_table (info);
2174 bfd *abfd = info->output_bfd;
2175 bfd_vma hi = htab->params->local_store_hi;
2176 bfd_vma lo = htab->params->local_store_lo;
2178 htab->local_store = hi + 1 - lo;
2180 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2181 if (m->p_type == PT_LOAD)
2182 for (i = 0; i < m->count; i++)
2183 if (m->sections[i]->size != 0
2184 && (m->sections[i]->vma < lo
2185 || m->sections[i]->vma > hi
2186 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2187 return m->sections[i];
2192 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2193 Search for stack adjusting insns, and return the sp delta.
2194 If a store of lr is found save the instruction offset to *LR_STORE.
2195 If a stack adjusting instruction is found, save that offset to
2199 find_function_stack_adjust (asection *sec,
2206 memset (reg, 0, sizeof (reg));
2207 for ( ; offset + 4 <= sec->size; offset += 4)
2209 unsigned char buf[4];
2213 /* Assume no relocs on stack adjusing insns. */
2214 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2218 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2220 if (buf[0] == 0x24 /* stqd */)
2222 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2227 /* Partly decoded immediate field. */
2228 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2230 if (buf[0] == 0x1c /* ai */)
2233 imm = (imm ^ 0x200) - 0x200;
2234 reg[rt] = reg[ra] + imm;
2236 if (rt == 1 /* sp */)
2240 *sp_adjust = offset;
2244 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2246 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2248 reg[rt] = reg[ra] + reg[rb];
2253 *sp_adjust = offset;
2257 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2259 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2261 reg[rt] = reg[rb] - reg[ra];
2266 *sp_adjust = offset;
2270 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2272 if (buf[0] >= 0x42 /* ila */)
2273 imm |= (buf[0] & 1) << 17;
2278 if (buf[0] == 0x40 /* il */)
2280 if ((buf[1] & 0x80) == 0)
2282 imm = (imm ^ 0x8000) - 0x8000;
2284 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2290 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2292 reg[rt] |= imm & 0xffff;
2295 else if (buf[0] == 0x04 /* ori */)
2298 imm = (imm ^ 0x200) - 0x200;
2299 reg[rt] = reg[ra] | imm;
2302 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2304 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2305 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2306 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2307 | ((imm & 0x1000) ? 0x000000ff : 0));
2310 else if (buf[0] == 0x16 /* andbi */)
2316 reg[rt] = reg[ra] & imm;
2319 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2321 /* Used in pic reg load. Say rt is trashed. Won't be used
2322 in stack adjust, but we need to continue past this branch. */
2326 else if (is_branch (buf) || is_indirect_branch (buf))
2327 /* If we hit a branch then we must be out of the prologue. */
2334 /* qsort predicate to sort symbols by section and value. */
2336 static Elf_Internal_Sym *sort_syms_syms;
2337 static asection **sort_syms_psecs;
2340 sort_syms (const void *a, const void *b)
2342 Elf_Internal_Sym *const *s1 = a;
2343 Elf_Internal_Sym *const *s2 = b;
2344 asection *sec1,*sec2;
2345 bfd_signed_vma delta;
2347 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2348 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2351 return sec1->index - sec2->index;
2353 delta = (*s1)->st_value - (*s2)->st_value;
2355 return delta < 0 ? -1 : 1;
2357 delta = (*s2)->st_size - (*s1)->st_size;
2359 return delta < 0 ? -1 : 1;
2361 return *s1 < *s2 ? -1 : 1;
2364 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2365 entries for section SEC. */
2367 static struct spu_elf_stack_info *
2368 alloc_stack_info (asection *sec, int max_fun)
2370 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2373 amt = sizeof (struct spu_elf_stack_info);
2374 amt += (max_fun - 1) * sizeof (struct function_info);
2375 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2376 if (sec_data->u.i.stack_info != NULL)
2377 sec_data->u.i.stack_info->max_fun = max_fun;
2378 return sec_data->u.i.stack_info;
2381 /* Add a new struct function_info describing a (part of a) function
2382 starting at SYM_H. Keep the array sorted by address. */
2384 static struct function_info *
2385 maybe_insert_function (asection *sec,
2388 bfd_boolean is_func)
2390 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2391 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2397 sinfo = alloc_stack_info (sec, 20);
2404 Elf_Internal_Sym *sym = sym_h;
2405 off = sym->st_value;
2406 size = sym->st_size;
2410 struct elf_link_hash_entry *h = sym_h;
2411 off = h->root.u.def.value;
2415 for (i = sinfo->num_fun; --i >= 0; )
2416 if (sinfo->fun[i].lo <= off)
2421 /* Don't add another entry for an alias, but do update some
2423 if (sinfo->fun[i].lo == off)
2425 /* Prefer globals over local syms. */
2426 if (global && !sinfo->fun[i].global)
2428 sinfo->fun[i].global = TRUE;
2429 sinfo->fun[i].u.h = sym_h;
2432 sinfo->fun[i].is_func = TRUE;
2433 return &sinfo->fun[i];
2435 /* Ignore a zero-size symbol inside an existing function. */
2436 else if (sinfo->fun[i].hi > off && size == 0)
2437 return &sinfo->fun[i];
2440 if (sinfo->num_fun >= sinfo->max_fun)
2442 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2443 bfd_size_type old = amt;
2445 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2446 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2447 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2448 sinfo = bfd_realloc (sinfo, amt);
2451 memset ((char *) sinfo + old, 0, amt - old);
2452 sec_data->u.i.stack_info = sinfo;
2455 if (++i < sinfo->num_fun)
2456 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2457 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2458 sinfo->fun[i].is_func = is_func;
2459 sinfo->fun[i].global = global;
2460 sinfo->fun[i].sec = sec;
2462 sinfo->fun[i].u.h = sym_h;
2464 sinfo->fun[i].u.sym = sym_h;
2465 sinfo->fun[i].lo = off;
2466 sinfo->fun[i].hi = off + size;
2467 sinfo->fun[i].lr_store = -1;
2468 sinfo->fun[i].sp_adjust = -1;
2469 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2470 &sinfo->fun[i].lr_store,
2471 &sinfo->fun[i].sp_adjust);
2472 sinfo->num_fun += 1;
2473 return &sinfo->fun[i];
2476 /* Return the name of FUN. */
2479 func_name (struct function_info *fun)
2483 Elf_Internal_Shdr *symtab_hdr;
2485 while (fun->start != NULL)
2489 return fun->u.h->root.root.string;
2492 if (fun->u.sym->st_name == 0)
2494 size_t len = strlen (sec->name);
2495 char *name = bfd_malloc (len + 10);
2498 sprintf (name, "%s+%lx", sec->name,
2499 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2503 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2504 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2507 /* Read the instruction at OFF in SEC. Return true iff the instruction
2508 is a nop, lnop, or stop 0 (all zero insn). */
2511 is_nop (asection *sec, bfd_vma off)
2513 unsigned char insn[4];
2515 if (off + 4 > sec->size
2516 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2518 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2520 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2525 /* Extend the range of FUN to cover nop padding up to LIMIT.
2526 Return TRUE iff some instruction other than a NOP was found. */
2529 insns_at_end (struct function_info *fun, bfd_vma limit)
2531 bfd_vma off = (fun->hi + 3) & -4;
2533 while (off < limit && is_nop (fun->sec, off))
2544 /* Check and fix overlapping function ranges. Return TRUE iff there
2545 are gaps in the current info we have about functions in SEC. */
2548 check_function_ranges (asection *sec, struct bfd_link_info *info)
2550 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2551 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2553 bfd_boolean gaps = FALSE;
2558 for (i = 1; i < sinfo->num_fun; i++)
2559 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2561 /* Fix overlapping symbols. */
2562 const char *f1 = func_name (&sinfo->fun[i - 1]);
2563 const char *f2 = func_name (&sinfo->fun[i]);
2565 /* xgettext:c-format */
2566 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2567 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2569 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2572 if (sinfo->num_fun == 0)
2576 if (sinfo->fun[0].lo != 0)
2578 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2580 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2582 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2583 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2585 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2591 /* Search current function info for a function that contains address
2592 OFFSET in section SEC. */
2594 static struct function_info *
2595 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2597 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2598 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2602 hi = sinfo->num_fun;
2605 mid = (lo + hi) / 2;
2606 if (offset < sinfo->fun[mid].lo)
2608 else if (offset >= sinfo->fun[mid].hi)
2611 return &sinfo->fun[mid];
2613 /* xgettext:c-format */
2614 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2616 bfd_set_error (bfd_error_bad_value);
2620 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2621 if CALLEE was new. If this function return FALSE, CALLEE should
2625 insert_callee (struct function_info *caller, struct call_info *callee)
2627 struct call_info **pp, *p;
2629 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2630 if (p->fun == callee->fun)
2632 /* Tail calls use less stack than normal calls. Retain entry
2633 for normal call over one for tail call. */
2634 p->is_tail &= callee->is_tail;
2637 p->fun->start = NULL;
2638 p->fun->is_func = TRUE;
2640 p->count += callee->count;
2641 /* Reorder list so most recent call is first. */
2643 p->next = caller->call_list;
2644 caller->call_list = p;
2647 callee->next = caller->call_list;
2648 caller->call_list = callee;
2652 /* Copy CALL and insert the copy into CALLER. */
2655 copy_callee (struct function_info *caller, const struct call_info *call)
2657 struct call_info *callee;
2658 callee = bfd_malloc (sizeof (*callee));
2662 if (!insert_callee (caller, callee))
2667 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2668 overlay stub sections. */
2671 interesting_section (asection *s)
2673 return (s->output_section != bfd_abs_section_ptr
2674 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2675 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2679 /* Rummage through the relocs for SEC, looking for function calls.
2680 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2681 mark destination symbols on calls as being functions. Also
2682 look at branches, which may be tail calls or go to hot/cold
2683 section part of same function. */
2686 mark_functions_via_relocs (asection *sec,
2687 struct bfd_link_info *info,
2690 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2691 Elf_Internal_Shdr *symtab_hdr;
2693 unsigned int priority = 0;
2694 static bfd_boolean warned;
2696 if (!interesting_section (sec)
2697 || sec->reloc_count == 0)
2700 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2702 if (internal_relocs == NULL)
2705 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2706 psyms = &symtab_hdr->contents;
2707 irela = internal_relocs;
2708 irelaend = irela + sec->reloc_count;
2709 for (; irela < irelaend; irela++)
2711 enum elf_spu_reloc_type r_type;
2712 unsigned int r_indx;
2714 Elf_Internal_Sym *sym;
2715 struct elf_link_hash_entry *h;
2717 bfd_boolean nonbranch, is_call;
2718 struct function_info *caller;
2719 struct call_info *callee;
2721 r_type = ELF32_R_TYPE (irela->r_info);
2722 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2724 r_indx = ELF32_R_SYM (irela->r_info);
2725 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2729 || sym_sec->output_section == bfd_abs_section_ptr)
2735 unsigned char insn[4];
2737 if (!bfd_get_section_contents (sec->owner, sec, insn,
2738 irela->r_offset, 4))
2740 if (is_branch (insn))
2742 is_call = (insn[0] & 0xfd) == 0x31;
2743 priority = insn[1] & 0x0f;
2745 priority |= insn[2];
2747 priority |= insn[3];
2749 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2750 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2753 info->callbacks->einfo
2754 /* xgettext:c-format */
2755 (_("%B(%A+0x%v): call to non-code section"
2756 " %B(%A), analysis incomplete\n"),
2757 sec->owner, sec, irela->r_offset,
2758 sym_sec->owner, sym_sec);
2773 /* For --auto-overlay, count possible stubs we need for
2774 function pointer references. */
2775 unsigned int sym_type;
2779 sym_type = ELF_ST_TYPE (sym->st_info);
2780 if (sym_type == STT_FUNC)
2782 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2783 spu_hash_table (info)->non_ovly_stub += 1;
2784 /* If the symbol type is STT_FUNC then this must be a
2785 function pointer initialisation. */
2788 /* Ignore data references. */
2789 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2790 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2792 /* Otherwise we probably have a jump table reloc for
2793 a switch statement or some other reference to a
2798 val = h->root.u.def.value;
2800 val = sym->st_value;
2801 val += irela->r_addend;
2805 struct function_info *fun;
2807 if (irela->r_addend != 0)
2809 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2812 fake->st_value = val;
2814 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2818 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2820 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2823 if (irela->r_addend != 0
2824 && fun->u.sym != sym)
2829 caller = find_function (sec, irela->r_offset, info);
2832 callee = bfd_malloc (sizeof *callee);
2836 callee->fun = find_function (sym_sec, val, info);
2837 if (callee->fun == NULL)
2839 callee->is_tail = !is_call;
2840 callee->is_pasted = FALSE;
2841 callee->broken_cycle = FALSE;
2842 callee->priority = priority;
2843 callee->count = nonbranch? 0 : 1;
2844 if (callee->fun->last_caller != sec)
2846 callee->fun->last_caller = sec;
2847 callee->fun->call_count += 1;
2849 if (!insert_callee (caller, callee))
2852 && !callee->fun->is_func
2853 && callee->fun->stack == 0)
2855 /* This is either a tail call or a branch from one part of
2856 the function to another, ie. hot/cold section. If the
2857 destination has been called by some other function then
2858 it is a separate function. We also assume that functions
2859 are not split across input files. */
2860 if (sec->owner != sym_sec->owner)
2862 callee->fun->start = NULL;
2863 callee->fun->is_func = TRUE;
2865 else if (callee->fun->start == NULL)
2867 struct function_info *caller_start = caller;
2868 while (caller_start->start)
2869 caller_start = caller_start->start;
2871 if (caller_start != callee->fun)
2872 callee->fun->start = caller_start;
2876 struct function_info *callee_start;
2877 struct function_info *caller_start;
2878 callee_start = callee->fun;
2879 while (callee_start->start)
2880 callee_start = callee_start->start;
2881 caller_start = caller;
2882 while (caller_start->start)
2883 caller_start = caller_start->start;
2884 if (caller_start != callee_start)
2886 callee->fun->start = NULL;
2887 callee->fun->is_func = TRUE;
2896 /* Handle something like .init or .fini, which has a piece of a function.
2897 These sections are pasted together to form a single function. */
2900 pasted_function (asection *sec)
2902 struct bfd_link_order *l;
2903 struct _spu_elf_section_data *sec_data;
2904 struct spu_elf_stack_info *sinfo;
2905 Elf_Internal_Sym *fake;
2906 struct function_info *fun, *fun_start;
2908 fake = bfd_zmalloc (sizeof (*fake));
2912 fake->st_size = sec->size;
2914 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2915 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2919 /* Find a function immediately preceding this section. */
2921 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2923 if (l->u.indirect.section == sec)
2925 if (fun_start != NULL)
2927 struct call_info *callee = bfd_malloc (sizeof *callee);
2931 fun->start = fun_start;
2933 callee->is_tail = TRUE;
2934 callee->is_pasted = TRUE;
2935 callee->broken_cycle = FALSE;
2936 callee->priority = 0;
2938 if (!insert_callee (fun_start, callee))
2944 if (l->type == bfd_indirect_link_order
2945 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2946 && (sinfo = sec_data->u.i.stack_info) != NULL
2947 && sinfo->num_fun != 0)
2948 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2951 /* Don't return an error if we did not find a function preceding this
2952 section. The section may have incorrect flags. */
2956 /* Map address ranges in code sections to functions. */
2959 discover_functions (struct bfd_link_info *info)
2963 Elf_Internal_Sym ***psym_arr;
2964 asection ***sec_arr;
2965 bfd_boolean gaps = FALSE;
2968 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2971 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2972 if (psym_arr == NULL)
2974 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2975 if (sec_arr == NULL)
2978 for (ibfd = info->input_bfds, bfd_idx = 0;
2980 ibfd = ibfd->link.next, bfd_idx++)
2982 extern const bfd_target spu_elf32_vec;
2983 Elf_Internal_Shdr *symtab_hdr;
2986 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2987 asection **psecs, **p;
2989 if (ibfd->xvec != &spu_elf32_vec)
2992 /* Read all the symbols. */
2993 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2994 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2998 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2999 if (interesting_section (sec))
3007 if (symtab_hdr->contents != NULL)
3009 /* Don't use cached symbols since the generic ELF linker
3010 code only reads local symbols, and we need globals too. */
3011 free (symtab_hdr->contents);
3012 symtab_hdr->contents = NULL;
3014 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3016 symtab_hdr->contents = (void *) syms;
3020 /* Select defined function symbols that are going to be output. */
3021 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3024 psym_arr[bfd_idx] = psyms;
3025 psecs = bfd_malloc (symcount * sizeof (*psecs));
3028 sec_arr[bfd_idx] = psecs;
3029 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3030 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3031 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3035 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3036 if (s != NULL && interesting_section (s))
3039 symcount = psy - psyms;
3042 /* Sort them by section and offset within section. */
3043 sort_syms_syms = syms;
3044 sort_syms_psecs = psecs;
3045 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3047 /* Now inspect the function symbols. */
3048 for (psy = psyms; psy < psyms + symcount; )
3050 asection *s = psecs[*psy - syms];
3051 Elf_Internal_Sym **psy2;
3053 for (psy2 = psy; ++psy2 < psyms + symcount; )
3054 if (psecs[*psy2 - syms] != s)
3057 if (!alloc_stack_info (s, psy2 - psy))
3062 /* First install info about properly typed and sized functions.
3063 In an ideal world this will cover all code sections, except
3064 when partitioning functions into hot and cold sections,
3065 and the horrible pasted together .init and .fini functions. */
3066 for (psy = psyms; psy < psyms + symcount; ++psy)
3069 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3071 asection *s = psecs[sy - syms];
3072 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3077 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3078 if (interesting_section (sec))
3079 gaps |= check_function_ranges (sec, info);
3084 /* See if we can discover more function symbols by looking at
3086 for (ibfd = info->input_bfds, bfd_idx = 0;
3088 ibfd = ibfd->link.next, bfd_idx++)
3092 if (psym_arr[bfd_idx] == NULL)
3095 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3096 if (!mark_functions_via_relocs (sec, info, FALSE))
3100 for (ibfd = info->input_bfds, bfd_idx = 0;
3102 ibfd = ibfd->link.next, bfd_idx++)
3104 Elf_Internal_Shdr *symtab_hdr;
3106 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3109 if ((psyms = psym_arr[bfd_idx]) == NULL)
3112 psecs = sec_arr[bfd_idx];
3114 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3115 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3118 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3119 if (interesting_section (sec))
3120 gaps |= check_function_ranges (sec, info);
3124 /* Finally, install all globals. */
3125 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3129 s = psecs[sy - syms];
3131 /* Global syms might be improperly typed functions. */
3132 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3133 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3135 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3141 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3143 extern const bfd_target spu_elf32_vec;
3146 if (ibfd->xvec != &spu_elf32_vec)
3149 /* Some of the symbols we've installed as marking the
3150 beginning of functions may have a size of zero. Extend
3151 the range of such functions to the beginning of the
3152 next symbol of interest. */
3153 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3154 if (interesting_section (sec))
3156 struct _spu_elf_section_data *sec_data;
3157 struct spu_elf_stack_info *sinfo;
3159 sec_data = spu_elf_section_data (sec);
3160 sinfo = sec_data->u.i.stack_info;
3161 if (sinfo != NULL && sinfo->num_fun != 0)
3164 bfd_vma hi = sec->size;
3166 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3168 sinfo->fun[fun_idx].hi = hi;
3169 hi = sinfo->fun[fun_idx].lo;
3172 sinfo->fun[0].lo = 0;
3174 /* No symbols in this section. Must be .init or .fini
3175 or something similar. */
3176 else if (!pasted_function (sec))
3182 for (ibfd = info->input_bfds, bfd_idx = 0;
3184 ibfd = ibfd->link.next, bfd_idx++)
3186 if (psym_arr[bfd_idx] == NULL)
3189 free (psym_arr[bfd_idx]);
3190 free (sec_arr[bfd_idx]);
3199 /* Iterate over all function_info we have collected, calling DOIT on
3200 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3204 for_each_node (bfd_boolean (*doit) (struct function_info *,
3205 struct bfd_link_info *,
3207 struct bfd_link_info *info,
3213 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3215 extern const bfd_target spu_elf32_vec;
3218 if (ibfd->xvec != &spu_elf32_vec)
3221 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3223 struct _spu_elf_section_data *sec_data;
3224 struct spu_elf_stack_info *sinfo;
3226 if ((sec_data = spu_elf_section_data (sec)) != NULL
3227 && (sinfo = sec_data->u.i.stack_info) != NULL)
3230 for (i = 0; i < sinfo->num_fun; ++i)
3231 if (!root_only || !sinfo->fun[i].non_root)
3232 if (!doit (&sinfo->fun[i], info, param))
3240 /* Transfer call info attached to struct function_info entries for
3241 all of a given function's sections to the first entry. */
3244 transfer_calls (struct function_info *fun,
3245 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3246 void *param ATTRIBUTE_UNUSED)
3248 struct function_info *start = fun->start;
3252 struct call_info *call, *call_next;
3254 while (start->start != NULL)
3255 start = start->start;
3256 for (call = fun->call_list; call != NULL; call = call_next)
3258 call_next = call->next;
3259 if (!insert_callee (start, call))
3262 fun->call_list = NULL;
3267 /* Mark nodes in the call graph that are called by some other node. */
3270 mark_non_root (struct function_info *fun,
3271 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3272 void *param ATTRIBUTE_UNUSED)
3274 struct call_info *call;
3279 for (call = fun->call_list; call; call = call->next)
3281 call->fun->non_root = TRUE;
3282 mark_non_root (call->fun, 0, 0);
3287 /* Remove cycles from the call graph. Set depth of nodes. */
3290 remove_cycles (struct function_info *fun,
3291 struct bfd_link_info *info,
3294 struct call_info **callp, *call;
3295 unsigned int depth = *(unsigned int *) param;
3296 unsigned int max_depth = depth;
3300 fun->marking = TRUE;
3302 callp = &fun->call_list;
3303 while ((call = *callp) != NULL)
3305 call->max_depth = depth + !call->is_pasted;
3306 if (!call->fun->visit2)
3308 if (!remove_cycles (call->fun, info, &call->max_depth))
3310 if (max_depth < call->max_depth)
3311 max_depth = call->max_depth;
3313 else if (call->fun->marking)
3315 struct spu_link_hash_table *htab = spu_hash_table (info);
3317 if (!htab->params->auto_overlay
3318 && htab->params->stack_analysis)
3320 const char *f1 = func_name (fun);
3321 const char *f2 = func_name (call->fun);
3323 /* xgettext:c-format */
3324 info->callbacks->info (_("Stack analysis will ignore the call "
3329 call->broken_cycle = TRUE;
3331 callp = &call->next;
3333 fun->marking = FALSE;
3334 *(unsigned int *) param = max_depth;
3338 /* Check that we actually visited all nodes in remove_cycles. If we
3339 didn't, then there is some cycle in the call graph not attached to
3340 any root node. Arbitrarily choose a node in the cycle as a new
3341 root and break the cycle. */
3344 mark_detached_root (struct function_info *fun,
3345 struct bfd_link_info *info,
3350 fun->non_root = FALSE;
3351 *(unsigned int *) param = 0;
3352 return remove_cycles (fun, info, param);
3355 /* Populate call_list for each function. */
3358 build_call_tree (struct bfd_link_info *info)
3363 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3365 extern const bfd_target spu_elf32_vec;
3368 if (ibfd->xvec != &spu_elf32_vec)
3371 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3372 if (!mark_functions_via_relocs (sec, info, TRUE))
3376 /* Transfer call info from hot/cold section part of function
3378 if (!spu_hash_table (info)->params->auto_overlay
3379 && !for_each_node (transfer_calls, info, 0, FALSE))
3382 /* Find the call graph root(s). */
3383 if (!for_each_node (mark_non_root, info, 0, FALSE))
3386 /* Remove cycles from the call graph. We start from the root node(s)
3387 so that we break cycles in a reasonable place. */
3389 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3392 return for_each_node (mark_detached_root, info, &depth, FALSE);
3395 /* qsort predicate to sort calls by priority, max_depth then count. */
3398 sort_calls (const void *a, const void *b)
3400 struct call_info *const *c1 = a;
3401 struct call_info *const *c2 = b;
3404 delta = (*c2)->priority - (*c1)->priority;
3408 delta = (*c2)->max_depth - (*c1)->max_depth;
3412 delta = (*c2)->count - (*c1)->count;
3416 return (char *) c1 - (char *) c2;
3420 unsigned int max_overlay_size;
3423 /* Set linker_mark and gc_mark on any sections that we will put in
3424 overlays. These flags are used by the generic ELF linker, but we
3425 won't be continuing on to bfd_elf_final_link so it is OK to use
3426 them. linker_mark is clear before we get here. Set segment_mark
3427 on sections that are part of a pasted function (excluding the last
3430 Set up function rodata section if --overlay-rodata. We don't
3431 currently include merged string constant rodata sections since
3433 Sort the call graph so that the deepest nodes will be visited
3437 mark_overlay_section (struct function_info *fun,
3438 struct bfd_link_info *info,
3441 struct call_info *call;
3443 struct _mos_param *mos_param = param;
3444 struct spu_link_hash_table *htab = spu_hash_table (info);
3450 if (!fun->sec->linker_mark
3451 && (htab->params->ovly_flavour != ovly_soft_icache
3452 || htab->params->non_ia_text
3453 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3454 || strcmp (fun->sec->name, ".init") == 0
3455 || strcmp (fun->sec->name, ".fini") == 0))
3459 fun->sec->linker_mark = 1;
3460 fun->sec->gc_mark = 1;
3461 fun->sec->segment_mark = 0;
3462 /* Ensure SEC_CODE is set on this text section (it ought to
3463 be!), and SEC_CODE is clear on rodata sections. We use
3464 this flag to differentiate the two overlay section types. */
3465 fun->sec->flags |= SEC_CODE;
3467 size = fun->sec->size;
3468 if (htab->params->auto_overlay & OVERLAY_RODATA)
3472 /* Find the rodata section corresponding to this function's
3474 if (strcmp (fun->sec->name, ".text") == 0)
3476 name = bfd_malloc (sizeof (".rodata"));
3479 memcpy (name, ".rodata", sizeof (".rodata"));
3481 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3483 size_t len = strlen (fun->sec->name);
3484 name = bfd_malloc (len + 3);
3487 memcpy (name, ".rodata", sizeof (".rodata"));
3488 memcpy (name + 7, fun->sec->name + 5, len - 4);
3490 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3492 size_t len = strlen (fun->sec->name) + 1;
3493 name = bfd_malloc (len);
3496 memcpy (name, fun->sec->name, len);
3502 asection *rodata = NULL;
3503 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3504 if (group_sec == NULL)
3505 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3507 while (group_sec != NULL && group_sec != fun->sec)
3509 if (strcmp (group_sec->name, name) == 0)
3514 group_sec = elf_section_data (group_sec)->next_in_group;
3516 fun->rodata = rodata;
3519 size += fun->rodata->size;
3520 if (htab->params->line_size != 0
3521 && size > htab->params->line_size)
3523 size -= fun->rodata->size;
3528 fun->rodata->linker_mark = 1;
3529 fun->rodata->gc_mark = 1;
3530 fun->rodata->flags &= ~SEC_CODE;
3536 if (mos_param->max_overlay_size < size)
3537 mos_param->max_overlay_size = size;
3540 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3545 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3549 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3550 calls[count++] = call;
3552 qsort (calls, count, sizeof (*calls), sort_calls);
3554 fun->call_list = NULL;
3558 calls[count]->next = fun->call_list;
3559 fun->call_list = calls[count];
3564 for (call = fun->call_list; call != NULL; call = call->next)
3566 if (call->is_pasted)
3568 /* There can only be one is_pasted call per function_info. */
3569 BFD_ASSERT (!fun->sec->segment_mark);
3570 fun->sec->segment_mark = 1;
3572 if (!call->broken_cycle
3573 && !mark_overlay_section (call->fun, info, param))
3577 /* Don't put entry code into an overlay. The overlay manager needs
3578 a stack! Also, don't mark .ovl.init as an overlay. */
3579 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3580 == info->output_bfd->start_address
3581 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3583 fun->sec->linker_mark = 0;
3584 if (fun->rodata != NULL)
3585 fun->rodata->linker_mark = 0;
3590 /* If non-zero then unmark functions called from those within sections
3591 that we need to unmark. Unfortunately this isn't reliable since the
3592 call graph cannot know the destination of function pointer calls. */
3593 #define RECURSE_UNMARK 0
3596 asection *exclude_input_section;
3597 asection *exclude_output_section;
3598 unsigned long clearing;
3601 /* Undo some of mark_overlay_section's work. */
3604 unmark_overlay_section (struct function_info *fun,
3605 struct bfd_link_info *info,
3608 struct call_info *call;
3609 struct _uos_param *uos_param = param;
3610 unsigned int excluded = 0;
3618 if (fun->sec == uos_param->exclude_input_section
3619 || fun->sec->output_section == uos_param->exclude_output_section)
3623 uos_param->clearing += excluded;
3625 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3627 fun->sec->linker_mark = 0;
3629 fun->rodata->linker_mark = 0;
3632 for (call = fun->call_list; call != NULL; call = call->next)
3633 if (!call->broken_cycle
3634 && !unmark_overlay_section (call->fun, info, param))
3638 uos_param->clearing -= excluded;
3643 unsigned int lib_size;
3644 asection **lib_sections;
3647 /* Add sections we have marked as belonging to overlays to an array
3648 for consideration as non-overlay sections. The array consist of
3649 pairs of sections, (text,rodata), for functions in the call graph. */
3652 collect_lib_sections (struct function_info *fun,
3653 struct bfd_link_info *info,
3656 struct _cl_param *lib_param = param;
3657 struct call_info *call;
3664 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3667 size = fun->sec->size;
3669 size += fun->rodata->size;
3671 if (size <= lib_param->lib_size)
3673 *lib_param->lib_sections++ = fun->sec;
3674 fun->sec->gc_mark = 0;
3675 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3677 *lib_param->lib_sections++ = fun->rodata;
3678 fun->rodata->gc_mark = 0;
3681 *lib_param->lib_sections++ = NULL;
3684 for (call = fun->call_list; call != NULL; call = call->next)
3685 if (!call->broken_cycle)
3686 collect_lib_sections (call->fun, info, param);
3691 /* qsort predicate to sort sections by call count. */
3694 sort_lib (const void *a, const void *b)
3696 asection *const *s1 = a;
3697 asection *const *s2 = b;
3698 struct _spu_elf_section_data *sec_data;
3699 struct spu_elf_stack_info *sinfo;
3703 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3704 && (sinfo = sec_data->u.i.stack_info) != NULL)
3707 for (i = 0; i < sinfo->num_fun; ++i)
3708 delta -= sinfo->fun[i].call_count;
3711 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3712 && (sinfo = sec_data->u.i.stack_info) != NULL)
3715 for (i = 0; i < sinfo->num_fun; ++i)
3716 delta += sinfo->fun[i].call_count;
3725 /* Remove some sections from those marked to be in overlays. Choose
3726 those that are called from many places, likely library functions. */
3729 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3732 asection **lib_sections;
3733 unsigned int i, lib_count;
3734 struct _cl_param collect_lib_param;
3735 struct function_info dummy_caller;
3736 struct spu_link_hash_table *htab;
3738 memset (&dummy_caller, 0, sizeof (dummy_caller));
3740 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3742 extern const bfd_target spu_elf32_vec;
3745 if (ibfd->xvec != &spu_elf32_vec)
3748 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3749 if (sec->linker_mark
3750 && sec->size < lib_size
3751 && (sec->flags & SEC_CODE) != 0)
3754 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3755 if (lib_sections == NULL)
3756 return (unsigned int) -1;
3757 collect_lib_param.lib_size = lib_size;
3758 collect_lib_param.lib_sections = lib_sections;
3759 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3761 return (unsigned int) -1;
3762 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3764 /* Sort sections so that those with the most calls are first. */
3766 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3768 htab = spu_hash_table (info);
3769 for (i = 0; i < lib_count; i++)
3771 unsigned int tmp, stub_size;
3773 struct _spu_elf_section_data *sec_data;
3774 struct spu_elf_stack_info *sinfo;
3776 sec = lib_sections[2 * i];
3777 /* If this section is OK, its size must be less than lib_size. */
3779 /* If it has a rodata section, then add that too. */
3780 if (lib_sections[2 * i + 1])
3781 tmp += lib_sections[2 * i + 1]->size;
3782 /* Add any new overlay call stubs needed by the section. */
3785 && (sec_data = spu_elf_section_data (sec)) != NULL
3786 && (sinfo = sec_data->u.i.stack_info) != NULL)
3789 struct call_info *call;
3791 for (k = 0; k < sinfo->num_fun; ++k)
3792 for (call = sinfo->fun[k].call_list; call; call = call->next)
3793 if (call->fun->sec->linker_mark)
3795 struct call_info *p;
3796 for (p = dummy_caller.call_list; p; p = p->next)
3797 if (p->fun == call->fun)
3800 stub_size += ovl_stub_size (htab->params);
3803 if (tmp + stub_size < lib_size)
3805 struct call_info **pp, *p;
3807 /* This section fits. Mark it as non-overlay. */
3808 lib_sections[2 * i]->linker_mark = 0;
3809 if (lib_sections[2 * i + 1])
3810 lib_sections[2 * i + 1]->linker_mark = 0;
3811 lib_size -= tmp + stub_size;
3812 /* Call stubs to the section we just added are no longer
3814 pp = &dummy_caller.call_list;
3815 while ((p = *pp) != NULL)
3816 if (!p->fun->sec->linker_mark)
3818 lib_size += ovl_stub_size (htab->params);
3824 /* Add new call stubs to dummy_caller. */
3825 if ((sec_data = spu_elf_section_data (sec)) != NULL
3826 && (sinfo = sec_data->u.i.stack_info) != NULL)
3829 struct call_info *call;
3831 for (k = 0; k < sinfo->num_fun; ++k)
3832 for (call = sinfo->fun[k].call_list;
3835 if (call->fun->sec->linker_mark)
3837 struct call_info *callee;
3838 callee = bfd_malloc (sizeof (*callee));
3840 return (unsigned int) -1;
3842 if (!insert_callee (&dummy_caller, callee))
3848 while (dummy_caller.call_list != NULL)
3850 struct call_info *call = dummy_caller.call_list;
3851 dummy_caller.call_list = call->next;
3854 for (i = 0; i < 2 * lib_count; i++)
3855 if (lib_sections[i])
3856 lib_sections[i]->gc_mark = 1;
3857 free (lib_sections);
3861 /* Build an array of overlay sections. The deepest node's section is
3862 added first, then its parent node's section, then everything called
3863 from the parent section. The idea being to group sections to
3864 minimise calls between different overlays. */
3867 collect_overlays (struct function_info *fun,
3868 struct bfd_link_info *info,
3871 struct call_info *call;
3872 bfd_boolean added_fun;
3873 asection ***ovly_sections = param;
3879 for (call = fun->call_list; call != NULL; call = call->next)
3880 if (!call->is_pasted && !call->broken_cycle)
3882 if (!collect_overlays (call->fun, info, ovly_sections))
3888 if (fun->sec->linker_mark && fun->sec->gc_mark)
3890 fun->sec->gc_mark = 0;
3891 *(*ovly_sections)++ = fun->sec;
3892 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3894 fun->rodata->gc_mark = 0;
3895 *(*ovly_sections)++ = fun->rodata;
3898 *(*ovly_sections)++ = NULL;
3901 /* Pasted sections must stay with the first section. We don't
3902 put pasted sections in the array, just the first section.
3903 Mark subsequent sections as already considered. */
3904 if (fun->sec->segment_mark)
3906 struct function_info *call_fun = fun;
3909 for (call = call_fun->call_list; call != NULL; call = call->next)
3910 if (call->is_pasted)
3912 call_fun = call->fun;
3913 call_fun->sec->gc_mark = 0;
3914 if (call_fun->rodata)
3915 call_fun->rodata->gc_mark = 0;
3921 while (call_fun->sec->segment_mark);
3925 for (call = fun->call_list; call != NULL; call = call->next)
3926 if (!call->broken_cycle
3927 && !collect_overlays (call->fun, info, ovly_sections))
3932 struct _spu_elf_section_data *sec_data;
3933 struct spu_elf_stack_info *sinfo;
3935 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3936 && (sinfo = sec_data->u.i.stack_info) != NULL)
3939 for (i = 0; i < sinfo->num_fun; ++i)
3940 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3948 struct _sum_stack_param {
3950 size_t overall_stack;
3951 bfd_boolean emit_stack_syms;
3954 /* Descend the call graph for FUN, accumulating total stack required. */
3957 sum_stack (struct function_info *fun,
3958 struct bfd_link_info *info,
3961 struct call_info *call;
3962 struct function_info *max;
3963 size_t stack, cum_stack;
3965 bfd_boolean has_call;
3966 struct _sum_stack_param *sum_stack_param = param;
3967 struct spu_link_hash_table *htab;
3969 cum_stack = fun->stack;
3970 sum_stack_param->cum_stack = cum_stack;
3976 for (call = fun->call_list; call; call = call->next)
3978 if (call->broken_cycle)
3980 if (!call->is_pasted)
3982 if (!sum_stack (call->fun, info, sum_stack_param))
3984 stack = sum_stack_param->cum_stack;
3985 /* Include caller stack for normal calls, don't do so for
3986 tail calls. fun->stack here is local stack usage for
3988 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3989 stack += fun->stack;
3990 if (cum_stack < stack)
3997 sum_stack_param->cum_stack = cum_stack;
3999 /* Now fun->stack holds cumulative stack. */
4000 fun->stack = cum_stack;
4004 && sum_stack_param->overall_stack < cum_stack)
4005 sum_stack_param->overall_stack = cum_stack;
4007 htab = spu_hash_table (info);
4008 if (htab->params->auto_overlay)
4011 f1 = func_name (fun);
4012 if (htab->params->stack_analysis)
4015 info->callbacks->info (" %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4016 info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4017 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4021 info->callbacks->minfo (_(" calls:\n"));
4022 for (call = fun->call_list; call; call = call->next)
4023 if (!call->is_pasted && !call->broken_cycle)
4025 const char *f2 = func_name (call->fun);
4026 const char *ann1 = call->fun == max ? "*" : " ";
4027 const char *ann2 = call->is_tail ? "t" : " ";
4029 info->callbacks->minfo (" %s%s %s\n", ann1, ann2, f2);
4034 if (sum_stack_param->emit_stack_syms)
4036 char *name = bfd_malloc (18 + strlen (f1));
4037 struct elf_link_hash_entry *h;
4042 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4043 sprintf (name, "__stack_%s", f1);
4045 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4047 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4050 && (h->root.type == bfd_link_hash_new
4051 || h->root.type == bfd_link_hash_undefined
4052 || h->root.type == bfd_link_hash_undefweak))
4054 h->root.type = bfd_link_hash_defined;
4055 h->root.u.def.section = bfd_abs_section_ptr;
4056 h->root.u.def.value = cum_stack;
4061 h->ref_regular_nonweak = 1;
4062 h->forced_local = 1;
4070 /* SEC is part of a pasted function. Return the call_info for the
4071 next section of this function. */
4073 static struct call_info *
4074 find_pasted_call (asection *sec)
4076 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4077 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4078 struct call_info *call;
4081 for (k = 0; k < sinfo->num_fun; ++k)
4082 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4083 if (call->is_pasted)
4089 /* qsort predicate to sort bfds by file name. */
4092 sort_bfds (const void *a, const void *b)
4094 bfd *const *abfd1 = a;
4095 bfd *const *abfd2 = b;
4097 return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4101 print_one_overlay_section (FILE *script,
4104 unsigned int ovlynum,
4105 unsigned int *ovly_map,
4106 asection **ovly_sections,
4107 struct bfd_link_info *info)
4111 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4113 asection *sec = ovly_sections[2 * j];
4115 if (fprintf (script, " %s%c%s (%s)\n",
4116 (sec->owner->my_archive != NULL
4117 ? sec->owner->my_archive->filename : ""),
4118 info->path_separator,
4119 sec->owner->filename,
4122 if (sec->segment_mark)
4124 struct call_info *call = find_pasted_call (sec);
4125 while (call != NULL)
4127 struct function_info *call_fun = call->fun;
4128 sec = call_fun->sec;
4129 if (fprintf (script, " %s%c%s (%s)\n",
4130 (sec->owner->my_archive != NULL
4131 ? sec->owner->my_archive->filename : ""),
4132 info->path_separator,
4133 sec->owner->filename,
4136 for (call = call_fun->call_list; call; call = call->next)
4137 if (call->is_pasted)
4143 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4145 asection *sec = ovly_sections[2 * j + 1];
4147 && fprintf (script, " %s%c%s (%s)\n",
4148 (sec->owner->my_archive != NULL
4149 ? sec->owner->my_archive->filename : ""),
4150 info->path_separator,
4151 sec->owner->filename,
4155 sec = ovly_sections[2 * j];
4156 if (sec->segment_mark)
4158 struct call_info *call = find_pasted_call (sec);
4159 while (call != NULL)
4161 struct function_info *call_fun = call->fun;
4162 sec = call_fun->rodata;
4164 && fprintf (script, " %s%c%s (%s)\n",
4165 (sec->owner->my_archive != NULL
4166 ? sec->owner->my_archive->filename : ""),
4167 info->path_separator,
4168 sec->owner->filename,
4171 for (call = call_fun->call_list; call; call = call->next)
4172 if (call->is_pasted)
4181 /* Handle --auto-overlay. */
4184 spu_elf_auto_overlay (struct bfd_link_info *info)
4188 struct elf_segment_map *m;
4189 unsigned int fixed_size, lo, hi;
4190 unsigned int reserved;
4191 struct spu_link_hash_table *htab;
4192 unsigned int base, i, count, bfd_count;
4193 unsigned int region, ovlynum;
4194 asection **ovly_sections, **ovly_p;
4195 unsigned int *ovly_map;
4197 unsigned int total_overlay_size, overlay_size;
4198 const char *ovly_mgr_entry;
4199 struct elf_link_hash_entry *h;
4200 struct _mos_param mos_param;
4201 struct _uos_param uos_param;
4202 struct function_info dummy_caller;
4204 /* Find the extents of our loadable image. */
4205 lo = (unsigned int) -1;
4207 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4208 if (m->p_type == PT_LOAD)
4209 for (i = 0; i < m->count; i++)
4210 if (m->sections[i]->size != 0)
4212 if (m->sections[i]->vma < lo)
4213 lo = m->sections[i]->vma;
4214 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4215 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4217 fixed_size = hi + 1 - lo;
4219 if (!discover_functions (info))
4222 if (!build_call_tree (info))
4225 htab = spu_hash_table (info);
4226 reserved = htab->params->auto_overlay_reserved;
4229 struct _sum_stack_param sum_stack_param;
4231 sum_stack_param.emit_stack_syms = 0;
4232 sum_stack_param.overall_stack = 0;
4233 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4235 reserved = (sum_stack_param.overall_stack
4236 + htab->params->extra_stack_space);
4239 /* No need for overlays if everything already fits. */
4240 if (fixed_size + reserved <= htab->local_store
4241 && htab->params->ovly_flavour != ovly_soft_icache)
4243 htab->params->auto_overlay = 0;
4247 uos_param.exclude_input_section = 0;
4248 uos_param.exclude_output_section
4249 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4251 ovly_mgr_entry = "__ovly_load";
4252 if (htab->params->ovly_flavour == ovly_soft_icache)
4253 ovly_mgr_entry = "__icache_br_handler";
4254 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4255 FALSE, FALSE, FALSE);
4257 && (h->root.type == bfd_link_hash_defined
4258 || h->root.type == bfd_link_hash_defweak)
4261 /* We have a user supplied overlay manager. */
4262 uos_param.exclude_input_section = h->root.u.def.section;
4266 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4267 builtin version to .text, and will adjust .text size. */
4268 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4271 /* Mark overlay sections, and find max overlay section size. */
4272 mos_param.max_overlay_size = 0;
4273 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4276 /* We can't put the overlay manager or interrupt routines in
4278 uos_param.clearing = 0;
4279 if ((uos_param.exclude_input_section
4280 || uos_param.exclude_output_section)
4281 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4285 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4287 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4288 if (bfd_arr == NULL)
4291 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4294 total_overlay_size = 0;
4295 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4297 extern const bfd_target spu_elf32_vec;
4299 unsigned int old_count;
4301 if (ibfd->xvec != &spu_elf32_vec)
4305 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4306 if (sec->linker_mark)
4308 if ((sec->flags & SEC_CODE) != 0)
4310 fixed_size -= sec->size;
4311 total_overlay_size += sec->size;
4313 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4314 && sec->output_section->owner == info->output_bfd
4315 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4316 fixed_size -= sec->size;
4317 if (count != old_count)
4318 bfd_arr[bfd_count++] = ibfd;
4321 /* Since the overlay link script selects sections by file name and
4322 section name, ensure that file names are unique. */
4325 bfd_boolean ok = TRUE;
4327 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4328 for (i = 1; i < bfd_count; ++i)
4329 if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4331 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4333 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4334 /* xgettext:c-format */
4335 info->callbacks->einfo (_("%s duplicated in %s\n"),
4336 bfd_arr[i]->filename,
4337 bfd_arr[i]->my_archive->filename);
4339 info->callbacks->einfo (_("%s duplicated\n"),
4340 bfd_arr[i]->filename);
4346 info->callbacks->einfo (_("sorry, no support for duplicate "
4347 "object files in auto-overlay script\n"));
4348 bfd_set_error (bfd_error_bad_value);
4354 fixed_size += reserved;
4355 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4356 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4358 if (htab->params->ovly_flavour == ovly_soft_icache)
4360 /* Stubs in the non-icache area are bigger. */
4361 fixed_size += htab->non_ovly_stub * 16;
4362 /* Space for icache manager tables.
4363 a) Tag array, one quadword per cache line.
4364 - word 0: ia address of present line, init to zero. */
4365 fixed_size += 16 << htab->num_lines_log2;
4366 /* b) Rewrite "to" list, one quadword per cache line. */
4367 fixed_size += 16 << htab->num_lines_log2;
4368 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4369 to a power-of-two number of full quadwords) per cache line. */
4370 fixed_size += 16 << (htab->fromelem_size_log2
4371 + htab->num_lines_log2);
4372 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4377 /* Guess number of overlays. Assuming overlay buffer is on
4378 average only half full should be conservative. */
4379 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4380 / (htab->local_store - fixed_size));
4381 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4382 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4386 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4387 /* xgettext:c-format */
4388 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4389 "size of 0x%v exceeds local store\n"),
4390 (bfd_vma) fixed_size,
4391 (bfd_vma) mos_param.max_overlay_size);
4393 /* Now see if we should put some functions in the non-overlay area. */
4394 else if (fixed_size < htab->params->auto_overlay_fixed)
4396 unsigned int max_fixed, lib_size;
4398 max_fixed = htab->local_store - mos_param.max_overlay_size;
4399 if (max_fixed > htab->params->auto_overlay_fixed)
4400 max_fixed = htab->params->auto_overlay_fixed;
4401 lib_size = max_fixed - fixed_size;
4402 lib_size = auto_ovl_lib_functions (info, lib_size);
4403 if (lib_size == (unsigned int) -1)
4405 fixed_size = max_fixed - lib_size;
4408 /* Build an array of sections, suitably sorted to place into
4410 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4411 if (ovly_sections == NULL)
4413 ovly_p = ovly_sections;
4414 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4416 count = (size_t) (ovly_p - ovly_sections) / 2;
4417 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4418 if (ovly_map == NULL)
4421 memset (&dummy_caller, 0, sizeof (dummy_caller));
4422 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4423 if (htab->params->line_size != 0)
4424 overlay_size = htab->params->line_size;
4427 while (base < count)
4429 unsigned int size = 0, rosize = 0, roalign = 0;
4431 for (i = base; i < count; i++)
4433 asection *sec, *rosec;
4434 unsigned int tmp, rotmp;
4435 unsigned int num_stubs;
4436 struct call_info *call, *pasty;
4437 struct _spu_elf_section_data *sec_data;
4438 struct spu_elf_stack_info *sinfo;
4441 /* See whether we can add this section to the current
4442 overlay without overflowing our overlay buffer. */
4443 sec = ovly_sections[2 * i];
4444 tmp = align_power (size, sec->alignment_power) + sec->size;
4446 rosec = ovly_sections[2 * i + 1];
4449 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4450 if (roalign < rosec->alignment_power)
4451 roalign = rosec->alignment_power;
4453 if (align_power (tmp, roalign) + rotmp > overlay_size)
4455 if (sec->segment_mark)
4457 /* Pasted sections must stay together, so add their
4459 pasty = find_pasted_call (sec);
4460 while (pasty != NULL)
4462 struct function_info *call_fun = pasty->fun;
4463 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4464 + call_fun->sec->size);
4465 if (call_fun->rodata)
4467 rotmp = (align_power (rotmp,
4468 call_fun->rodata->alignment_power)
4469 + call_fun->rodata->size);
4470 if (roalign < rosec->alignment_power)
4471 roalign = rosec->alignment_power;
4473 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4474 if (pasty->is_pasted)
4478 if (align_power (tmp, roalign) + rotmp > overlay_size)
4481 /* If we add this section, we might need new overlay call
4482 stubs. Add any overlay section calls to dummy_call. */
4484 sec_data = spu_elf_section_data (sec);
4485 sinfo = sec_data->u.i.stack_info;
4486 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4487 for (call = sinfo->fun[k].call_list; call; call = call->next)
4488 if (call->is_pasted)
4490 BFD_ASSERT (pasty == NULL);
4493 else if (call->fun->sec->linker_mark)
4495 if (!copy_callee (&dummy_caller, call))
4498 while (pasty != NULL)
4500 struct function_info *call_fun = pasty->fun;
4502 for (call = call_fun->call_list; call; call = call->next)
4503 if (call->is_pasted)
4505 BFD_ASSERT (pasty == NULL);
4508 else if (!copy_callee (&dummy_caller, call))
4512 /* Calculate call stub size. */
4514 for (call = dummy_caller.call_list; call; call = call->next)
4516 unsigned int stub_delta = 1;
4518 if (htab->params->ovly_flavour == ovly_soft_icache)
4519 stub_delta = call->count;
4520 num_stubs += stub_delta;
4522 /* If the call is within this overlay, we won't need a
4524 for (k = base; k < i + 1; k++)
4525 if (call->fun->sec == ovly_sections[2 * k])
4527 num_stubs -= stub_delta;
4531 if (htab->params->ovly_flavour == ovly_soft_icache
4532 && num_stubs > htab->params->max_branch)
4534 if (align_power (tmp, roalign) + rotmp
4535 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4543 /* xgettext:c-format */
4544 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4545 ovly_sections[2 * i]->owner,
4546 ovly_sections[2 * i],
4547 ovly_sections[2 * i + 1] ? " + rodata" : "");
4548 bfd_set_error (bfd_error_bad_value);
4552 while (dummy_caller.call_list != NULL)
4554 struct call_info *call = dummy_caller.call_list;
4555 dummy_caller.call_list = call->next;
4561 ovly_map[base++] = ovlynum;
4564 script = htab->params->spu_elf_open_overlay_script ();
4566 if (htab->params->ovly_flavour == ovly_soft_icache)
4568 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4571 if (fprintf (script,
4572 " . = ALIGN (%u);\n"
4573 " .ovl.init : { *(.ovl.init) }\n"
4574 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4575 htab->params->line_size) <= 0)
4580 while (base < count)
4582 unsigned int indx = ovlynum - 1;
4583 unsigned int vma, lma;
4585 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4586 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4588 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4589 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4590 ovlynum, vma, lma) <= 0)
4593 base = print_one_overlay_section (script, base, count, ovlynum,
4594 ovly_map, ovly_sections, info);
4595 if (base == (unsigned) -1)
4598 if (fprintf (script, " }\n") <= 0)
4604 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4605 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4608 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4613 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4616 if (fprintf (script,
4617 " . = ALIGN (16);\n"
4618 " .ovl.init : { *(.ovl.init) }\n"
4619 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4622 for (region = 1; region <= htab->params->num_lines; region++)
4626 while (base < count && ovly_map[base] < ovlynum)
4634 /* We need to set lma since we are overlaying .ovl.init. */
4635 if (fprintf (script,
4636 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4641 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4645 while (base < count)
4647 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4650 base = print_one_overlay_section (script, base, count, ovlynum,
4651 ovly_map, ovly_sections, info);
4652 if (base == (unsigned) -1)
4655 if (fprintf (script, " }\n") <= 0)
4658 ovlynum += htab->params->num_lines;
4659 while (base < count && ovly_map[base] < ovlynum)
4663 if (fprintf (script, " }\n") <= 0)
4667 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4672 free (ovly_sections);
4674 if (fclose (script) != 0)
4677 if (htab->params->auto_overlay & AUTO_RELINK)
4678 (*htab->params->spu_elf_relink) ();
4683 bfd_set_error (bfd_error_system_call);
4685 info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
4689 /* Provide an estimate of total stack required. */
4692 spu_elf_stack_analysis (struct bfd_link_info *info)
4694 struct spu_link_hash_table *htab;
4695 struct _sum_stack_param sum_stack_param;
4697 if (!discover_functions (info))
4700 if (!build_call_tree (info))
4703 htab = spu_hash_table (info);
4704 if (htab->params->stack_analysis)
4706 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4707 info->callbacks->minfo (_("\nStack size for functions. "
4708 "Annotations: '*' max stack, 't' tail call\n"));
4711 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4712 sum_stack_param.overall_stack = 0;
4713 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4716 if (htab->params->stack_analysis)
4717 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4718 (bfd_vma) sum_stack_param.overall_stack);
4722 /* Perform a final link. */
4725 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4727 struct spu_link_hash_table *htab = spu_hash_table (info);
4729 if (htab->params->auto_overlay)
4730 spu_elf_auto_overlay (info);
4732 if ((htab->params->stack_analysis
4733 || (htab->params->ovly_flavour == ovly_soft_icache
4734 && htab->params->lrlive_analysis))
4735 && !spu_elf_stack_analysis (info))
4736 info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4738 if (!spu_elf_build_stubs (info))
4739 info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4741 return bfd_elf_final_link (output_bfd, info);
4744 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4745 and !info->emitrelocations. Returns a count of special relocs
4746 that need to be emitted. */
4749 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4751 Elf_Internal_Rela *relocs;
4752 unsigned int count = 0;
4754 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4758 Elf_Internal_Rela *rel;
4759 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4761 for (rel = relocs; rel < relend; rel++)
4763 int r_type = ELF32_R_TYPE (rel->r_info);
4764 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4768 if (elf_section_data (sec)->relocs != relocs)
4775 /* Functions for adding fixup records to .fixup */
4777 #define FIXUP_RECORD_SIZE 4
4779 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4780 bfd_put_32 (output_bfd, addr, \
4781 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4782 #define FIXUP_GET(output_bfd,htab,index) \
4783 bfd_get_32 (output_bfd, \
4784 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4786 /* Store OFFSET in .fixup. This assumes it will be called with an
4787 increasing OFFSET. When this OFFSET fits with the last base offset,
4788 it just sets a bit, otherwise it adds a new fixup record. */
4790 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4793 struct spu_link_hash_table *htab = spu_hash_table (info);
4794 asection *sfixup = htab->sfixup;
4795 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4796 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4797 if (sfixup->reloc_count == 0)
4799 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4800 sfixup->reloc_count++;
4804 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4805 if (qaddr != (base & ~(bfd_vma) 15))
4807 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4808 _bfd_error_handler (_("fatal error while creating .fixup"));
4809 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4810 sfixup->reloc_count++;
4813 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4817 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4820 spu_elf_relocate_section (bfd *output_bfd,
4821 struct bfd_link_info *info,
4823 asection *input_section,
4825 Elf_Internal_Rela *relocs,
4826 Elf_Internal_Sym *local_syms,
4827 asection **local_sections)
4829 Elf_Internal_Shdr *symtab_hdr;
4830 struct elf_link_hash_entry **sym_hashes;
4831 Elf_Internal_Rela *rel, *relend;
4832 struct spu_link_hash_table *htab;
4835 bfd_boolean emit_these_relocs = FALSE;
4836 bfd_boolean is_ea_sym;
4838 unsigned int iovl = 0;
4840 htab = spu_hash_table (info);
4841 stubs = (htab->stub_sec != NULL
4842 && maybe_needs_stubs (input_section));
4843 iovl = overlay_index (input_section);
4844 ea = bfd_get_section_by_name (output_bfd, "._ea");
4845 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4846 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4849 relend = relocs + input_section->reloc_count;
4850 for (; rel < relend; rel++)
4853 reloc_howto_type *howto;
4854 unsigned int r_symndx;
4855 Elf_Internal_Sym *sym;
4857 struct elf_link_hash_entry *h;
4858 const char *sym_name;
4861 bfd_reloc_status_type r;
4862 bfd_boolean unresolved_reloc;
4863 enum _stub_type stub_type;
4865 r_symndx = ELF32_R_SYM (rel->r_info);
4866 r_type = ELF32_R_TYPE (rel->r_info);
4867 howto = elf_howto_table + r_type;
4868 unresolved_reloc = FALSE;
4872 if (r_symndx < symtab_hdr->sh_info)
4874 sym = local_syms + r_symndx;
4875 sec = local_sections[r_symndx];
4876 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4877 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4881 if (sym_hashes == NULL)
4884 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4886 if (info->wrap_hash != NULL
4887 && (input_section->flags & SEC_DEBUGGING) != 0)
4888 h = ((struct elf_link_hash_entry *)
4889 unwrap_hash_lookup (info, input_bfd, &h->root));
4891 while (h->root.type == bfd_link_hash_indirect
4892 || h->root.type == bfd_link_hash_warning)
4893 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4896 if (h->root.type == bfd_link_hash_defined
4897 || h->root.type == bfd_link_hash_defweak)
4899 sec = h->root.u.def.section;
4901 || sec->output_section == NULL)
4902 /* Set a flag that will be cleared later if we find a
4903 relocation value for this symbol. output_section
4904 is typically NULL for symbols satisfied by a shared
4906 unresolved_reloc = TRUE;
4908 relocation = (h->root.u.def.value
4909 + sec->output_section->vma
4910 + sec->output_offset);
4912 else if (h->root.type == bfd_link_hash_undefweak)
4914 else if (info->unresolved_syms_in_objects == RM_IGNORE
4915 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4917 else if (!bfd_link_relocatable (info)
4918 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4921 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4922 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4923 (*info->callbacks->undefined_symbol) (info,
4924 h->root.root.string,
4927 rel->r_offset, err);
4929 sym_name = h->root.root.string;
4932 if (sec != NULL && discarded_section (sec))
4933 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4934 rel, 1, relend, howto, 0, contents);
4936 if (bfd_link_relocatable (info))
4939 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4940 if (r_type == R_SPU_ADD_PIC
4942 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4944 bfd_byte *loc = contents + rel->r_offset;
4950 is_ea_sym = (ea != NULL
4952 && sec->output_section == ea);
4954 /* If this symbol is in an overlay area, we may need to relocate
4955 to the overlay stub. */
4956 addend = rel->r_addend;
4959 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4960 contents, info)) != no_stub)
4962 unsigned int ovl = 0;
4963 struct got_entry *g, **head;
4965 if (stub_type != nonovl_stub)
4969 head = &h->got.glist;
4971 head = elf_local_got_ents (input_bfd) + r_symndx;
4973 for (g = *head; g != NULL; g = g->next)
4974 if (htab->params->ovly_flavour == ovly_soft_icache
4976 && g->br_addr == (rel->r_offset
4977 + input_section->output_offset
4978 + input_section->output_section->vma))
4979 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4984 relocation = g->stub_addr;
4989 /* For soft icache, encode the overlay index into addresses. */
4990 if (htab->params->ovly_flavour == ovly_soft_icache
4991 && (r_type == R_SPU_ADDR16_HI
4992 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4995 unsigned int ovl = overlay_index (sec);
4998 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4999 relocation += set_id << 18;
5004 if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5005 && (input_section->flags & SEC_ALLOC) != 0
5006 && r_type == R_SPU_ADDR32)
5009 offset = rel->r_offset + input_section->output_section->vma
5010 + input_section->output_offset;
5011 spu_elf_emit_fixup (output_bfd, info, offset);
5014 if (unresolved_reloc)
5016 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5020 /* ._ea is a special section that isn't allocated in SPU
5021 memory, but rather occupies space in PPU memory as
5022 part of an embedded ELF image. If this reloc is
5023 against a symbol defined in ._ea, then transform the
5024 reloc into an equivalent one without a symbol
5025 relative to the start of the ELF image. */
5026 rel->r_addend += (relocation
5028 + elf_section_data (ea)->this_hdr.sh_offset);
5029 rel->r_info = ELF32_R_INFO (0, r_type);
5031 emit_these_relocs = TRUE;
5035 unresolved_reloc = TRUE;
5037 if (unresolved_reloc
5038 && _bfd_elf_section_offset (output_bfd, info, input_section,
5039 rel->r_offset) != (bfd_vma) -1)
5042 /* xgettext:c-format */
5043 (_("%B(%s+%#Lx): unresolvable %s relocation against symbol `%s'"),
5045 bfd_get_section_name (input_bfd, input_section),
5052 r = _bfd_final_link_relocate (howto,
5056 rel->r_offset, relocation, addend);
5058 if (r != bfd_reloc_ok)
5060 const char *msg = (const char *) 0;
5064 case bfd_reloc_overflow:
5065 (*info->callbacks->reloc_overflow)
5066 (info, (h ? &h->root : NULL), sym_name, howto->name,
5067 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5070 case bfd_reloc_undefined:
5071 (*info->callbacks->undefined_symbol)
5072 (info, sym_name, input_bfd, input_section, rel->r_offset, TRUE);
5075 case bfd_reloc_outofrange:
5076 msg = _("internal error: out of range error");
5079 case bfd_reloc_notsupported:
5080 msg = _("internal error: unsupported relocation error");
5083 case bfd_reloc_dangerous:
5084 msg = _("internal error: dangerous error");
5088 msg = _("internal error: unknown error");
5093 (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5094 input_section, rel->r_offset);
5101 && emit_these_relocs
5102 && !info->emitrelocations)
5104 Elf_Internal_Rela *wrel;
5105 Elf_Internal_Shdr *rel_hdr;
5107 wrel = rel = relocs;
5108 relend = relocs + input_section->reloc_count;
5109 for (; rel < relend; rel++)
5113 r_type = ELF32_R_TYPE (rel->r_info);
5114 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5117 input_section->reloc_count = wrel - relocs;
5118 /* Backflips for _bfd_elf_link_output_relocs. */
5119 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5120 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5128 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5129 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5134 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5137 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5138 const char *sym_name ATTRIBUTE_UNUSED,
5139 Elf_Internal_Sym *sym,
5140 asection *sym_sec ATTRIBUTE_UNUSED,
5141 struct elf_link_hash_entry *h)
5143 struct spu_link_hash_table *htab = spu_hash_table (info);
5145 if (!bfd_link_relocatable (info)
5146 && htab->stub_sec != NULL
5148 && (h->root.type == bfd_link_hash_defined
5149 || h->root.type == bfd_link_hash_defweak)
5151 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5153 struct got_entry *g;
5155 for (g = h->got.glist; g != NULL; g = g->next)
5156 if (htab->params->ovly_flavour == ovly_soft_icache
5157 ? g->br_addr == g->stub_addr
5158 : g->addend == 0 && g->ovl == 0)
5160 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5161 (htab->stub_sec[0]->output_section->owner,
5162 htab->stub_sec[0]->output_section));
5163 sym->st_value = g->stub_addr;
5171 static int spu_plugin = 0;
5174 spu_elf_plugin (int val)
5179 /* Set ELF header e_type for plugins. */
5182 spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
5186 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5188 i_ehdrp->e_type = ET_DYN;
5191 _bfd_elf_post_process_headers (abfd, info);
5194 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5195 segments for overlays. */
5198 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5205 struct spu_link_hash_table *htab = spu_hash_table (info);
5206 extra = htab->num_overlays;
5212 sec = bfd_get_section_by_name (abfd, ".toe");
5213 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5219 /* Remove .toe section from other PT_LOAD segments and put it in
5220 a segment of its own. Put overlays in separate segments too. */
5223 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5226 struct elf_segment_map *m, *m_overlay;
5227 struct elf_segment_map **p, **p_overlay;
5233 toe = bfd_get_section_by_name (abfd, ".toe");
5234 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5235 if (m->p_type == PT_LOAD && m->count > 1)
5236 for (i = 0; i < m->count; i++)
5237 if ((s = m->sections[i]) == toe
5238 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5240 struct elf_segment_map *m2;
5243 if (i + 1 < m->count)
5245 amt = sizeof (struct elf_segment_map);
5246 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5247 m2 = bfd_zalloc (abfd, amt);
5250 m2->count = m->count - (i + 1);
5251 memcpy (m2->sections, m->sections + i + 1,
5252 m2->count * sizeof (m->sections[0]));
5253 m2->p_type = PT_LOAD;
5261 amt = sizeof (struct elf_segment_map);
5262 m2 = bfd_zalloc (abfd, amt);
5265 m2->p_type = PT_LOAD;
5267 m2->sections[0] = s;
5275 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5276 PT_LOAD segments. This can cause the .ovl.init section to be
5277 overwritten with the contents of some overlay segment. To work
5278 around this issue, we ensure that all PF_OVERLAY segments are
5279 sorted first amongst the program headers; this ensures that even
5280 with a broken loader, the .ovl.init section (which is not marked
5281 as PF_OVERLAY) will be placed into SPU local store on startup. */
5283 /* Move all overlay segments onto a separate list. */
5284 p = &elf_seg_map (abfd);
5285 p_overlay = &m_overlay;
5288 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5289 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5294 p_overlay = &m->next;
5301 /* Re-insert overlay segments at the head of the segment map. */
5302 *p_overlay = elf_seg_map (abfd);
5303 elf_seg_map (abfd) = m_overlay;
5308 /* Tweak the section type of .note.spu_name. */
5311 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5312 Elf_Internal_Shdr *hdr,
5315 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5316 hdr->sh_type = SHT_NOTE;
5320 /* Tweak phdrs before writing them out. */
5323 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5325 const struct elf_backend_data *bed;
5326 struct elf_obj_tdata *tdata;
5327 Elf_Internal_Phdr *phdr, *last;
5328 struct spu_link_hash_table *htab;
5335 bed = get_elf_backend_data (abfd);
5336 tdata = elf_tdata (abfd);
5338 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5339 htab = spu_hash_table (info);
5340 if (htab->num_overlays != 0)
5342 struct elf_segment_map *m;
5345 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5347 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5349 /* Mark this as an overlay header. */
5350 phdr[i].p_flags |= PF_OVERLAY;
5352 if (htab->ovtab != NULL && htab->ovtab->size != 0
5353 && htab->params->ovly_flavour != ovly_soft_icache)
5355 bfd_byte *p = htab->ovtab->contents;
5356 unsigned int off = o * 16 + 8;
5358 /* Write file_off into _ovly_table. */
5359 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5362 /* Soft-icache has its file offset put in .ovl.init. */
5363 if (htab->init != NULL && htab->init->size != 0)
5365 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5367 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5371 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5372 of 16. This should always be possible when using the standard
5373 linker scripts, but don't create overlapping segments if
5374 someone is playing games with linker scripts. */
5376 for (i = count; i-- != 0; )
5377 if (phdr[i].p_type == PT_LOAD)
5381 adjust = -phdr[i].p_filesz & 15;
5384 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5387 adjust = -phdr[i].p_memsz & 15;
5390 && phdr[i].p_filesz != 0
5391 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5392 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5395 if (phdr[i].p_filesz != 0)
5399 if (i == (unsigned int) -1)
5400 for (i = count; i-- != 0; )
5401 if (phdr[i].p_type == PT_LOAD)
5405 adjust = -phdr[i].p_filesz & 15;
5406 phdr[i].p_filesz += adjust;
5408 adjust = -phdr[i].p_memsz & 15;
5409 phdr[i].p_memsz += adjust;
5416 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5418 struct spu_link_hash_table *htab = spu_hash_table (info);
5419 if (htab->params->emit_fixups)
5421 asection *sfixup = htab->sfixup;
5422 int fixup_count = 0;
5426 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5430 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5433 /* Walk over each section attached to the input bfd. */
5434 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5436 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5439 /* If there aren't any relocs, then there's nothing more
5441 if ((isec->flags & SEC_ALLOC) == 0
5442 || (isec->flags & SEC_RELOC) == 0
5443 || isec->reloc_count == 0)
5446 /* Get the relocs. */
5448 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5450 if (internal_relocs == NULL)
5453 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5454 relocations. They are stored in a single word by
5455 saving the upper 28 bits of the address and setting the
5456 lower 4 bits to a bit mask of the words that have the
5457 relocation. BASE_END keeps track of the next quadword. */
5458 irela = internal_relocs;
5459 irelaend = irela + isec->reloc_count;
5461 for (; irela < irelaend; irela++)
5462 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5463 && irela->r_offset >= base_end)
5465 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5471 /* We always have a NULL fixup as a sentinel */
5472 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5473 if (!bfd_set_section_size (output_bfd, sfixup, size))
5475 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5476 if (sfixup->contents == NULL)
5482 #define TARGET_BIG_SYM spu_elf32_vec
5483 #define TARGET_BIG_NAME "elf32-spu"
5484 #define ELF_ARCH bfd_arch_spu
5485 #define ELF_TARGET_ID SPU_ELF_DATA
5486 #define ELF_MACHINE_CODE EM_SPU
5487 /* This matches the alignment need for DMA. */
5488 #define ELF_MAXPAGESIZE 0x80
5489 #define elf_backend_rela_normal 1
5490 #define elf_backend_can_gc_sections 1
5492 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5493 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5494 #define elf_info_to_howto spu_elf_info_to_howto
5495 #define elf_backend_count_relocs spu_elf_count_relocs
5496 #define elf_backend_relocate_section spu_elf_relocate_section
5497 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5498 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5499 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5500 #define elf_backend_object_p spu_elf_object_p
5501 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5502 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5504 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5505 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5506 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5507 #define elf_backend_post_process_headers spu_elf_post_process_headers
5508 #define elf_backend_fake_sections spu_elf_fake_sections
5509 #define elf_backend_special_sections spu_elf_special_sections
5510 #define bfd_elf32_bfd_final_link spu_elf_final_link
5512 #include "elf32-target.h"