1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
109 case BFD_RELOC_SPU_IMM10W:
111 case BFD_RELOC_SPU_IMM16W:
113 case BFD_RELOC_SPU_LO16:
114 return R_SPU_ADDR16_LO;
115 case BFD_RELOC_SPU_HI16:
116 return R_SPU_ADDR16_HI;
117 case BFD_RELOC_SPU_IMM18:
119 case BFD_RELOC_SPU_PCREL16:
121 case BFD_RELOC_SPU_IMM7:
123 case BFD_RELOC_SPU_IMM8:
125 case BFD_RELOC_SPU_PCREL9a:
127 case BFD_RELOC_SPU_PCREL9b:
129 case BFD_RELOC_SPU_IMM10:
130 return R_SPU_ADDR10I;
131 case BFD_RELOC_SPU_IMM16:
132 return R_SPU_ADDR16I;
135 case BFD_RELOC_32_PCREL:
137 case BFD_RELOC_SPU_PPU32:
139 case BFD_RELOC_SPU_PPU64:
141 case BFD_RELOC_SPU_ADD_PIC:
142 return R_SPU_ADD_PIC;
147 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
149 Elf_Internal_Rela *dst)
151 enum elf_spu_reloc_type r_type;
153 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
154 BFD_ASSERT (r_type < R_SPU_max);
155 cache_ptr->howto = &elf_howto_table[(int) r_type];
158 static reloc_howto_type *
159 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
160 bfd_reloc_code_real_type code)
162 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
164 if (r_type == R_SPU_NONE)
167 return elf_howto_table + r_type;
170 static reloc_howto_type *
171 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
176 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
177 if (elf_howto_table[i].name != NULL
178 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
179 return &elf_howto_table[i];
184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
186 static bfd_reloc_status_type
187 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
188 void *data, asection *input_section,
189 bfd *output_bfd, char **error_message)
191 bfd_size_type octets;
195 /* If this is a relocatable link (output_bfd test tells us), just
196 call the generic function. Any adjustment will be done at final
198 if (output_bfd != NULL)
199 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
200 input_section, output_bfd, error_message);
202 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
203 return bfd_reloc_outofrange;
204 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
206 /* Get symbol value. */
208 if (!bfd_is_com_section (symbol->section))
210 if (symbol->section->output_section)
211 val += symbol->section->output_section->vma;
213 val += reloc_entry->addend;
215 /* Make it pc-relative. */
216 val -= input_section->output_section->vma + input_section->output_offset;
219 if (val + 256 >= 512)
220 return bfd_reloc_overflow;
222 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
224 /* Move two high bits of value to REL9I and REL9 position.
225 The mask will take care of selecting the right field. */
226 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
227 insn &= ~reloc_entry->howto->dst_mask;
228 insn |= val & reloc_entry->howto->dst_mask;
229 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
234 spu_elf_new_section_hook (bfd *abfd, asection *sec)
236 if (!sec->used_by_bfd)
238 struct _spu_elf_section_data *sdata;
240 sdata = bfd_zalloc (abfd, sizeof (*sdata));
243 sec->used_by_bfd = sdata;
246 return _bfd_elf_new_section_hook (abfd, sec);
249 /* Set up overlay info for executables. */
252 spu_elf_object_p (bfd *abfd)
254 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
256 unsigned int i, num_ovl, num_buf;
257 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
258 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
259 Elf_Internal_Phdr *last_phdr = NULL;
261 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
262 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
267 if (last_phdr == NULL
268 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
271 for (j = 1; j < elf_numsections (abfd); j++)
273 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
275 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
277 asection *sec = shdr->bfd_section;
278 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
279 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
287 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
288 strip --strip-unneeded will not remove them. */
291 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
293 if (sym->name != NULL
294 && sym->section != bfd_abs_section_ptr
295 && strncmp (sym->name, "_EAR_", 5) == 0)
296 sym->flags |= BSF_KEEP;
299 /* SPU ELF linker hash table. */
301 struct spu_link_hash_table
303 struct elf_link_hash_table elf;
305 struct spu_elf_params *params;
307 /* Shortcuts to overlay sections. */
313 /* Count of stubs in each overlay section. */
314 unsigned int *stub_count;
316 /* The stub section for each overlay section. */
319 struct elf_link_hash_entry *ovly_entry[2];
321 /* Number of overlay buffers. */
322 unsigned int num_buf;
324 /* Total number of overlays. */
325 unsigned int num_overlays;
327 /* For soft icache. */
328 unsigned int line_size_log2;
329 unsigned int num_lines_log2;
330 unsigned int fromelem_size_log2;
332 /* How much memory we have. */
333 unsigned int local_store;
335 /* Count of overlay stubs needed in non-overlay area. */
336 unsigned int non_ovly_stub;
338 /* Pointer to the fixup section */
342 unsigned int stub_err : 1;
345 /* Hijack the generic got fields for overlay stub accounting. */
349 struct got_entry *next;
358 #define spu_hash_table(p) \
359 ((struct spu_link_hash_table *) ((p)->hash))
363 struct function_info *fun;
364 struct call_info *next;
366 unsigned int max_depth;
367 unsigned int is_tail : 1;
368 unsigned int is_pasted : 1;
369 unsigned int broken_cycle : 1;
370 unsigned int priority : 13;
375 /* List of functions called. Also branches to hot/cold part of
377 struct call_info *call_list;
378 /* For hot/cold part of function, point to owner. */
379 struct function_info *start;
380 /* Symbol at start of function. */
382 Elf_Internal_Sym *sym;
383 struct elf_link_hash_entry *h;
385 /* Function section. */
388 /* Where last called from, and number of sections called from. */
389 asection *last_caller;
390 unsigned int call_count;
391 /* Address range of (this part of) function. */
393 /* Offset where we found a store of lr, or -1 if none found. */
395 /* Offset where we found the stack adjustment insn. */
399 /* Distance from root of call tree. Tail and hot/cold branches
400 count as one deeper. We aren't counting stack frames here. */
402 /* Set if global symbol. */
403 unsigned int global : 1;
404 /* Set if known to be start of function (as distinct from a hunk
405 in hot/cold section. */
406 unsigned int is_func : 1;
407 /* Set if not a root node. */
408 unsigned int non_root : 1;
409 /* Flags used during call tree traversal. It's cheaper to replicate
410 the visit flags than have one which needs clearing after a traversal. */
411 unsigned int visit1 : 1;
412 unsigned int visit2 : 1;
413 unsigned int marking : 1;
414 unsigned int visit3 : 1;
415 unsigned int visit4 : 1;
416 unsigned int visit5 : 1;
417 unsigned int visit6 : 1;
418 unsigned int visit7 : 1;
421 struct spu_elf_stack_info
425 /* Variable size array describing functions, one per contiguous
426 address range belonging to a function. */
427 struct function_info fun[1];
430 static struct function_info *find_function (asection *, bfd_vma,
431 struct bfd_link_info *);
433 /* Create a spu ELF linker hash table. */
435 static struct bfd_link_hash_table *
436 spu_elf_link_hash_table_create (bfd *abfd)
438 struct spu_link_hash_table *htab;
440 htab = bfd_malloc (sizeof (*htab));
444 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
445 _bfd_elf_link_hash_newfunc,
446 sizeof (struct elf_link_hash_entry)))
452 memset (&htab->ovtab, 0,
453 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
455 htab->elf.init_got_refcount.refcount = 0;
456 htab->elf.init_got_refcount.glist = NULL;
457 htab->elf.init_got_offset.offset = 0;
458 htab->elf.init_got_offset.glist = NULL;
459 return &htab->elf.root;
463 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
465 bfd_vma max_branch_log2;
467 struct spu_link_hash_table *htab = spu_hash_table (info);
468 htab->params = params;
469 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
470 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
472 /* For the software i-cache, we provide a "from" list whose size
473 is a power-of-two number of quadwords, big enough to hold one
474 byte per outgoing branch. Compute this number here. */
475 max_branch_log2 = bfd_log2 (htab->params->max_branch);
476 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
479 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
480 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
481 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
484 get_sym_h (struct elf_link_hash_entry **hp,
485 Elf_Internal_Sym **symp,
487 Elf_Internal_Sym **locsymsp,
488 unsigned long r_symndx,
491 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
493 if (r_symndx >= symtab_hdr->sh_info)
495 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
496 struct elf_link_hash_entry *h;
498 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
499 while (h->root.type == bfd_link_hash_indirect
500 || h->root.type == bfd_link_hash_warning)
501 h = (struct elf_link_hash_entry *) h->root.u.i.link;
511 asection *symsec = NULL;
512 if (h->root.type == bfd_link_hash_defined
513 || h->root.type == bfd_link_hash_defweak)
514 symsec = h->root.u.def.section;
520 Elf_Internal_Sym *sym;
521 Elf_Internal_Sym *locsyms = *locsymsp;
525 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
527 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
529 0, NULL, NULL, NULL);
534 sym = locsyms + r_symndx;
543 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
549 /* Create the note section if not already present. This is done early so
550 that the linker maps the sections to the right place in the output. */
553 spu_elf_create_sections (struct bfd_link_info *info)
555 struct spu_link_hash_table *htab = spu_hash_table (info);
558 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
559 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
564 /* Make SPU_PTNOTE_SPUNAME section. */
571 ibfd = info->input_bfds;
572 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
573 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
575 || !bfd_set_section_alignment (ibfd, s, 4))
578 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
579 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
580 size += (name_len + 3) & -4;
582 if (!bfd_set_section_size (ibfd, s, size))
585 data = bfd_zalloc (ibfd, size);
589 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
590 bfd_put_32 (ibfd, name_len, data + 4);
591 bfd_put_32 (ibfd, 1, data + 8);
592 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
593 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
594 bfd_get_filename (info->output_bfd), name_len);
598 if (htab->params->emit_fixups)
602 ibfd = info->input_bfds;
603 flags = SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
605 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
606 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
614 /* qsort predicate to sort sections by vma. */
617 sort_sections (const void *a, const void *b)
619 const asection *const *s1 = a;
620 const asection *const *s2 = b;
621 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
624 return delta < 0 ? -1 : 1;
626 return (*s1)->index - (*s2)->index;
629 /* Identify overlays in the output bfd, and number them.
630 Returns 0 on error, 1 if no overlays, 2 if overlays. */
633 spu_elf_find_overlays (struct bfd_link_info *info)
635 struct spu_link_hash_table *htab = spu_hash_table (info);
636 asection **alloc_sec;
637 unsigned int i, n, ovl_index, num_buf;
640 static const char *const entry_names[2][2] = {
641 { "__ovly_load", "__icache_br_handler" },
642 { "__ovly_return", "__icache_call_handler" }
645 if (info->output_bfd->section_count < 2)
649 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
650 if (alloc_sec == NULL)
653 /* Pick out all the alloced sections. */
654 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
655 if ((s->flags & SEC_ALLOC) != 0
656 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
666 /* Sort them by vma. */
667 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
669 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
670 if (htab->params->ovly_flavour == ovly_soft_icache)
672 unsigned int prev_buf = 0, set_id = 0;
674 /* Look for an overlapping vma to find the first overlay section. */
675 bfd_vma vma_start = 0;
677 for (i = 1; i < n; i++)
680 if (s->vma < ovl_end)
682 asection *s0 = alloc_sec[i - 1];
686 << (htab->num_lines_log2 + htab->line_size_log2)));
691 ovl_end = s->vma + s->size;
694 /* Now find any sections within the cache area. */
695 for (ovl_index = 0, num_buf = 0; i < n; i++)
698 if (s->vma >= ovl_end)
701 /* A section in an overlay area called .ovl.init is not
702 an overlay, in the sense that it might be loaded in
703 by the overlay manager, but rather the initial
704 section contents for the overlay buffer. */
705 if (strncmp (s->name, ".ovl.init", 9) != 0)
707 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
708 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
711 if ((s->vma - vma_start) & (htab->params->line_size - 1))
713 info->callbacks->einfo (_("%X%P: overlay section %A "
714 "does not start on a cache line.\n"),
716 bfd_set_error (bfd_error_bad_value);
719 else if (s->size > htab->params->line_size)
721 info->callbacks->einfo (_("%X%P: overlay section %A "
722 "is larger than a cache line.\n"),
724 bfd_set_error (bfd_error_bad_value);
728 alloc_sec[ovl_index++] = s;
729 spu_elf_section_data (s)->u.o.ovl_index
730 = (set_id << htab->num_lines_log2) + num_buf;
731 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
735 /* Ensure there are no more overlay sections. */
739 if (s->vma < ovl_end)
741 info->callbacks->einfo (_("%X%P: overlay section %A "
742 "is not in cache area.\n"),
744 bfd_set_error (bfd_error_bad_value);
748 ovl_end = s->vma + s->size;
753 /* Look for overlapping vmas. Any with overlap must be overlays.
754 Count them. Also count the number of overlay regions. */
755 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
758 if (s->vma < ovl_end)
760 asection *s0 = alloc_sec[i - 1];
762 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
765 if (strncmp (s0->name, ".ovl.init", 9) != 0)
767 alloc_sec[ovl_index] = s0;
768 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
769 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
772 ovl_end = s->vma + s->size;
774 if (strncmp (s->name, ".ovl.init", 9) != 0)
776 alloc_sec[ovl_index] = s;
777 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
778 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
779 if (s0->vma != s->vma)
781 info->callbacks->einfo (_("%X%P: overlay sections %A "
782 "and %A do not start at the "
785 bfd_set_error (bfd_error_bad_value);
788 if (ovl_end < s->vma + s->size)
789 ovl_end = s->vma + s->size;
793 ovl_end = s->vma + s->size;
797 htab->num_overlays = ovl_index;
798 htab->num_buf = num_buf;
799 htab->ovl_sec = alloc_sec;
804 for (i = 0; i < 2; i++)
807 struct elf_link_hash_entry *h;
809 name = entry_names[i][htab->params->ovly_flavour];
810 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
814 if (h->root.type == bfd_link_hash_new)
816 h->root.type = bfd_link_hash_undefined;
818 h->ref_regular_nonweak = 1;
821 htab->ovly_entry[i] = h;
827 /* Non-zero to use bra in overlay stubs rather than br. */
830 #define BRA 0x30000000
831 #define BRASL 0x31000000
832 #define BR 0x32000000
833 #define BRSL 0x33000000
834 #define NOP 0x40200000
835 #define LNOP 0x00200000
836 #define ILA 0x42000000
838 /* Return true for all relative and absolute branch instructions.
846 brhnz 00100011 0.. */
849 is_branch (const unsigned char *insn)
851 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
854 /* Return true for all indirect branch instructions.
862 bihnz 00100101 011 */
865 is_indirect_branch (const unsigned char *insn)
867 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
870 /* Return true for branch hint instructions.
875 is_hint (const unsigned char *insn)
877 return (insn[0] & 0xfc) == 0x10;
880 /* True if INPUT_SECTION might need overlay stubs. */
883 maybe_needs_stubs (asection *input_section)
885 /* No stubs for debug sections and suchlike. */
886 if ((input_section->flags & SEC_ALLOC) == 0)
889 /* No stubs for link-once sections that will be discarded. */
890 if (input_section->output_section == bfd_abs_section_ptr)
893 /* Don't create stubs for .eh_frame references. */
894 if (strcmp (input_section->name, ".eh_frame") == 0)
916 /* Return non-zero if this reloc symbol should go via an overlay stub.
917 Return 2 if the stub must be in non-overlay area. */
919 static enum _stub_type
920 needs_ovl_stub (struct elf_link_hash_entry *h,
921 Elf_Internal_Sym *sym,
923 asection *input_section,
924 Elf_Internal_Rela *irela,
926 struct bfd_link_info *info)
928 struct spu_link_hash_table *htab = spu_hash_table (info);
929 enum elf_spu_reloc_type r_type;
930 unsigned int sym_type;
931 bfd_boolean branch, hint, call;
932 enum _stub_type ret = no_stub;
936 || sym_sec->output_section == bfd_abs_section_ptr
937 || spu_elf_section_data (sym_sec->output_section) == NULL)
942 /* Ensure no stubs for user supplied overlay manager syms. */
943 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
946 /* setjmp always goes via an overlay stub, because then the return
947 and hence the longjmp goes via __ovly_return. That magically
948 makes setjmp/longjmp between overlays work. */
949 if (strncmp (h->root.root.string, "setjmp", 6) == 0
950 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
957 sym_type = ELF_ST_TYPE (sym->st_info);
959 r_type = ELF32_R_TYPE (irela->r_info);
963 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
965 if (contents == NULL)
968 if (!bfd_get_section_contents (input_section->owner,
975 contents += irela->r_offset;
977 branch = is_branch (contents);
978 hint = is_hint (contents);
981 call = (contents[0] & 0xfd) == 0x31;
983 && sym_type != STT_FUNC
986 /* It's common for people to write assembly and forget
987 to give function symbols the right type. Handle
988 calls to such symbols, but warn so that (hopefully)
989 people will fix their code. We need the symbol
990 type to be correct to distinguish function pointer
991 initialisation from other pointer initialisations. */
992 const char *sym_name;
995 sym_name = h->root.root.string;
998 Elf_Internal_Shdr *symtab_hdr;
999 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1000 sym_name = bfd_elf_sym_name (input_section->owner,
1005 (*_bfd_error_handler) (_("warning: call to non-function"
1006 " symbol %s defined in %B"),
1007 sym_sec->owner, sym_name);
1013 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1014 || (sym_type != STT_FUNC
1015 && !(branch || hint)
1016 && (sym_sec->flags & SEC_CODE) == 0))
1019 /* Usually, symbols in non-overlay sections don't need stubs. */
1020 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1021 && !htab->params->non_overlay_stubs)
1024 /* A reference from some other section to a symbol in an overlay
1025 section needs a stub. */
1026 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1027 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1029 unsigned int lrlive = 0;
1031 lrlive = (contents[1] & 0x70) >> 4;
1033 if (!lrlive && (call || sym_type == STT_FUNC))
1034 ret = call_ovl_stub;
1036 ret = br000_ovl_stub + lrlive;
1039 /* If this insn isn't a branch then we are possibly taking the
1040 address of a function and passing it out somehow. Soft-icache code
1041 always generates inline code to do indirect branches. */
1042 if (!(branch || hint)
1043 && sym_type == STT_FUNC
1044 && htab->params->ovly_flavour != ovly_soft_icache)
1051 count_stub (struct spu_link_hash_table *htab,
1054 enum _stub_type stub_type,
1055 struct elf_link_hash_entry *h,
1056 const Elf_Internal_Rela *irela)
1058 unsigned int ovl = 0;
1059 struct got_entry *g, **head;
1062 /* If this instruction is a branch or call, we need a stub
1063 for it. One stub per function per overlay.
1064 If it isn't a branch, then we are taking the address of
1065 this function so need a stub in the non-overlay area
1066 for it. One stub per function. */
1067 if (stub_type != nonovl_stub)
1068 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1071 head = &h->got.glist;
1074 if (elf_local_got_ents (ibfd) == NULL)
1076 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1077 * sizeof (*elf_local_got_ents (ibfd)));
1078 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1079 if (elf_local_got_ents (ibfd) == NULL)
1082 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1085 if (htab->params->ovly_flavour == ovly_soft_icache)
1087 htab->stub_count[ovl] += 1;
1093 addend = irela->r_addend;
1097 struct got_entry *gnext;
1099 for (g = *head; g != NULL; g = g->next)
1100 if (g->addend == addend && g->ovl == 0)
1105 /* Need a new non-overlay area stub. Zap other stubs. */
1106 for (g = *head; g != NULL; g = gnext)
1109 if (g->addend == addend)
1111 htab->stub_count[g->ovl] -= 1;
1119 for (g = *head; g != NULL; g = g->next)
1120 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1126 g = bfd_malloc (sizeof *g);
1131 g->stub_addr = (bfd_vma) -1;
1135 htab->stub_count[ovl] += 1;
1141 /* Support two sizes of overlay stubs, a slower more compact stub of two
1142 intructions, and a faster stub of four instructions.
1143 Soft-icache stubs are four or eight words. */
1146 ovl_stub_size (struct spu_elf_params *params)
1148 return 16 << params->ovly_flavour >> params->compact_stub;
1152 ovl_stub_size_log2 (struct spu_elf_params *params)
1154 return 4 + params->ovly_flavour - params->compact_stub;
1157 /* Two instruction overlay stubs look like:
1159 brsl $75,__ovly_load
1160 .word target_ovl_and_address
1162 ovl_and_address is a word with the overlay number in the top 14 bits
1163 and local store address in the bottom 18 bits.
1165 Four instruction overlay stubs look like:
1169 ila $79,target_address
1172 Software icache stubs are:
1176 .word lrlive_branchlocalstoreaddr;
1177 brasl $75,__icache_br_handler
1182 build_stub (struct bfd_link_info *info,
1185 enum _stub_type stub_type,
1186 struct elf_link_hash_entry *h,
1187 const Elf_Internal_Rela *irela,
1191 struct spu_link_hash_table *htab = spu_hash_table (info);
1192 unsigned int ovl, dest_ovl, set_id;
1193 struct got_entry *g, **head;
1195 bfd_vma addend, from, to, br_dest, patt;
1196 unsigned int lrlive;
1199 if (stub_type != nonovl_stub)
1200 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1203 head = &h->got.glist;
1205 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1209 addend = irela->r_addend;
1211 if (htab->params->ovly_flavour == ovly_soft_icache)
1213 g = bfd_malloc (sizeof *g);
1219 g->br_addr = (irela->r_offset
1220 + isec->output_offset
1221 + isec->output_section->vma);
1227 for (g = *head; g != NULL; g = g->next)
1228 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1233 if (g->ovl == 0 && ovl != 0)
1236 if (g->stub_addr != (bfd_vma) -1)
1240 sec = htab->stub_sec[ovl];
1241 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1242 from = sec->size + sec->output_offset + sec->output_section->vma;
1243 g->stub_addr = from;
1244 to = (htab->ovly_entry[0]->root.u.def.value
1245 + htab->ovly_entry[0]->root.u.def.section->output_offset
1246 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1248 if (((dest | to | from) & 3) != 0)
1253 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1255 if (htab->params->ovly_flavour == ovly_normal
1256 && !htab->params->compact_stub)
1258 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1259 sec->contents + sec->size);
1260 bfd_put_32 (sec->owner, LNOP,
1261 sec->contents + sec->size + 4);
1262 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1263 sec->contents + sec->size + 8);
1265 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1266 sec->contents + sec->size + 12);
1268 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1269 sec->contents + sec->size + 12);
1271 else if (htab->params->ovly_flavour == ovly_normal
1272 && htab->params->compact_stub)
1275 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1276 sec->contents + sec->size);
1278 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1279 sec->contents + sec->size);
1280 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1281 sec->contents + sec->size + 4);
1283 else if (htab->params->ovly_flavour == ovly_soft_icache
1284 && htab->params->compact_stub)
1287 if (stub_type == nonovl_stub)
1289 else if (stub_type == call_ovl_stub)
1290 /* A brsl makes lr live and *(*sp+16) is live.
1291 Tail calls have the same liveness. */
1293 else if (!htab->params->lrlive_analysis)
1294 /* Assume stack frame and lr save. */
1296 else if (irela != NULL)
1298 /* Analyse branch instructions. */
1299 struct function_info *caller;
1302 caller = find_function (isec, irela->r_offset, info);
1303 if (caller->start == NULL)
1304 off = irela->r_offset;
1307 struct function_info *found = NULL;
1309 /* Find the earliest piece of this function that
1310 has frame adjusting instructions. We might
1311 see dynamic frame adjustment (eg. for alloca)
1312 in some later piece, but functions using
1313 alloca always set up a frame earlier. Frame
1314 setup instructions are always in one piece. */
1315 if (caller->lr_store != (bfd_vma) -1
1316 || caller->sp_adjust != (bfd_vma) -1)
1318 while (caller->start != NULL)
1320 caller = caller->start;
1321 if (caller->lr_store != (bfd_vma) -1
1322 || caller->sp_adjust != (bfd_vma) -1)
1330 if (off > caller->sp_adjust)
1332 if (off > caller->lr_store)
1333 /* Only *(*sp+16) is live. */
1336 /* If no lr save, then we must be in a
1337 leaf function with a frame.
1338 lr is still live. */
1341 else if (off > caller->lr_store)
1343 /* Between lr save and stack adjust. */
1345 /* This should never happen since prologues won't
1350 /* On entry to function. */
1353 if (stub_type != br000_ovl_stub
1354 && lrlive != stub_type - br000_ovl_stub)
1355 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1356 "from analysis (%u)\n"),
1357 isec, irela->r_offset, lrlive,
1358 stub_type - br000_ovl_stub);
1361 /* If given lrlive info via .brinfo, use it. */
1362 if (stub_type > br000_ovl_stub)
1363 lrlive = stub_type - br000_ovl_stub;
1366 to = (htab->ovly_entry[1]->root.u.def.value
1367 + htab->ovly_entry[1]->root.u.def.section->output_offset
1368 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1370 /* The branch that uses this stub goes to stub_addr + 4. We'll
1371 set up an xor pattern that can be used by the icache manager
1372 to modify this branch to go directly to its destination. */
1374 br_dest = g->stub_addr;
1377 /* Except in the case of _SPUEAR_ stubs, the branch in
1378 question is the one in the stub itself. */
1379 BFD_ASSERT (stub_type == nonovl_stub);
1380 g->br_addr = g->stub_addr;
1384 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1385 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1386 sec->contents + sec->size);
1387 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1388 sec->contents + sec->size + 4);
1389 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1390 sec->contents + sec->size + 8);
1391 patt = dest ^ br_dest;
1392 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1393 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1394 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1395 sec->contents + sec->size + 12);
1398 /* Extra space for linked list entries. */
1404 sec->size += ovl_stub_size (htab->params);
1406 if (htab->params->emit_stub_syms)
1412 len = 8 + sizeof (".ovl_call.") - 1;
1414 len += strlen (h->root.root.string);
1419 add = (int) irela->r_addend & 0xffffffff;
1422 name = bfd_malloc (len);
1426 sprintf (name, "%08x.ovl_call.", g->ovl);
1428 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1430 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1431 dest_sec->id & 0xffffffff,
1432 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1434 sprintf (name + len - 9, "+%x", add);
1436 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1440 if (h->root.type == bfd_link_hash_new)
1442 h->root.type = bfd_link_hash_defined;
1443 h->root.u.def.section = sec;
1444 h->size = ovl_stub_size (htab->params);
1445 h->root.u.def.value = sec->size - h->size;
1449 h->ref_regular_nonweak = 1;
1450 h->forced_local = 1;
1458 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1462 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1464 /* Symbols starting with _SPUEAR_ need a stub because they may be
1465 invoked by the PPU. */
1466 struct bfd_link_info *info = inf;
1467 struct spu_link_hash_table *htab = spu_hash_table (info);
1470 if ((h->root.type == bfd_link_hash_defined
1471 || h->root.type == bfd_link_hash_defweak)
1473 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1474 && (sym_sec = h->root.u.def.section) != NULL
1475 && sym_sec->output_section != bfd_abs_section_ptr
1476 && spu_elf_section_data (sym_sec->output_section) != NULL
1477 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1478 || htab->params->non_overlay_stubs))
1480 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1487 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1489 /* Symbols starting with _SPUEAR_ need a stub because they may be
1490 invoked by the PPU. */
1491 struct bfd_link_info *info = inf;
1492 struct spu_link_hash_table *htab = spu_hash_table (info);
1495 if ((h->root.type == bfd_link_hash_defined
1496 || h->root.type == bfd_link_hash_defweak)
1498 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1499 && (sym_sec = h->root.u.def.section) != NULL
1500 && sym_sec->output_section != bfd_abs_section_ptr
1501 && spu_elf_section_data (sym_sec->output_section) != NULL
1502 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1503 || htab->params->non_overlay_stubs))
1505 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1506 h->root.u.def.value, sym_sec);
1512 /* Size or build stubs. */
1515 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1517 struct spu_link_hash_table *htab = spu_hash_table (info);
1520 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1522 extern const bfd_target bfd_elf32_spu_vec;
1523 Elf_Internal_Shdr *symtab_hdr;
1525 Elf_Internal_Sym *local_syms = NULL;
1527 if (ibfd->xvec != &bfd_elf32_spu_vec)
1530 /* We'll need the symbol table in a second. */
1531 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1532 if (symtab_hdr->sh_info == 0)
1535 /* Walk over each section attached to the input bfd. */
1536 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1538 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1540 /* If there aren't any relocs, then there's nothing more to do. */
1541 if ((isec->flags & SEC_RELOC) == 0
1542 || isec->reloc_count == 0)
1545 if (!maybe_needs_stubs (isec))
1548 /* Get the relocs. */
1549 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1551 if (internal_relocs == NULL)
1552 goto error_ret_free_local;
1554 /* Now examine each relocation. */
1555 irela = internal_relocs;
1556 irelaend = irela + isec->reloc_count;
1557 for (; irela < irelaend; irela++)
1559 enum elf_spu_reloc_type r_type;
1560 unsigned int r_indx;
1562 Elf_Internal_Sym *sym;
1563 struct elf_link_hash_entry *h;
1564 enum _stub_type stub_type;
1566 r_type = ELF32_R_TYPE (irela->r_info);
1567 r_indx = ELF32_R_SYM (irela->r_info);
1569 if (r_type >= R_SPU_max)
1571 bfd_set_error (bfd_error_bad_value);
1572 error_ret_free_internal:
1573 if (elf_section_data (isec)->relocs != internal_relocs)
1574 free (internal_relocs);
1575 error_ret_free_local:
1576 if (local_syms != NULL
1577 && (symtab_hdr->contents
1578 != (unsigned char *) local_syms))
1583 /* Determine the reloc target section. */
1584 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1585 goto error_ret_free_internal;
1587 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1589 if (stub_type == no_stub)
1591 else if (stub_type == stub_error)
1592 goto error_ret_free_internal;
1594 if (htab->stub_count == NULL)
1597 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1598 htab->stub_count = bfd_zmalloc (amt);
1599 if (htab->stub_count == NULL)
1600 goto error_ret_free_internal;
1605 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1606 goto error_ret_free_internal;
1613 dest = h->root.u.def.value;
1615 dest = sym->st_value;
1616 dest += irela->r_addend;
1617 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1619 goto error_ret_free_internal;
1623 /* We're done with the internal relocs, free them. */
1624 if (elf_section_data (isec)->relocs != internal_relocs)
1625 free (internal_relocs);
1628 if (local_syms != NULL
1629 && symtab_hdr->contents != (unsigned char *) local_syms)
1631 if (!info->keep_memory)
1634 symtab_hdr->contents = (unsigned char *) local_syms;
1641 /* Allocate space for overlay call and return stubs.
1642 Return 0 on error, 1 if no overlays, 2 otherwise. */
1645 spu_elf_size_stubs (struct bfd_link_info *info)
1647 struct spu_link_hash_table *htab;
1654 if (!process_stubs (info, FALSE))
1657 htab = spu_hash_table (info);
1658 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1662 ibfd = info->input_bfds;
1663 if (htab->stub_count != NULL)
1665 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1666 htab->stub_sec = bfd_zmalloc (amt);
1667 if (htab->stub_sec == NULL)
1670 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1671 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1672 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1673 htab->stub_sec[0] = stub;
1675 || !bfd_set_section_alignment (ibfd, stub,
1676 ovl_stub_size_log2 (htab->params)))
1678 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1679 if (htab->params->ovly_flavour == ovly_soft_icache)
1680 /* Extra space for linked list entries. */
1681 stub->size += htab->stub_count[0] * 16;
1683 for (i = 0; i < htab->num_overlays; ++i)
1685 asection *osec = htab->ovl_sec[i];
1686 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1687 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1688 htab->stub_sec[ovl] = stub;
1690 || !bfd_set_section_alignment (ibfd, stub,
1691 ovl_stub_size_log2 (htab->params)))
1693 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1697 if (htab->params->ovly_flavour == ovly_soft_icache)
1699 /* Space for icache manager tables.
1700 a) Tag array, one quadword per cache line.
1701 b) Rewrite "to" list, one quadword per cache line.
1702 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1703 a power-of-two number of full quadwords) per cache line. */
1706 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1707 if (htab->ovtab == NULL
1708 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1711 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1712 << htab->num_lines_log2;
1714 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1715 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1716 if (htab->init == NULL
1717 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1720 htab->init->size = 16;
1722 else if (htab->stub_count == NULL)
1726 /* htab->ovtab consists of two arrays.
1736 . } _ovly_buf_table[];
1739 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1740 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1741 if (htab->ovtab == NULL
1742 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1745 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1748 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1749 if (htab->toe == NULL
1750 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1752 htab->toe->size = 16;
1757 /* Called from ld to place overlay manager data sections. This is done
1758 after the overlay manager itself is loaded, mainly so that the
1759 linker's htab->init section is placed after any other .ovl.init
1763 spu_elf_place_overlay_data (struct bfd_link_info *info)
1765 struct spu_link_hash_table *htab = spu_hash_table (info);
1768 if (htab->stub_sec != NULL)
1770 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1772 for (i = 0; i < htab->num_overlays; ++i)
1774 asection *osec = htab->ovl_sec[i];
1775 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1776 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1780 if (htab->params->ovly_flavour == ovly_soft_icache)
1781 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1783 if (htab->ovtab != NULL)
1785 const char *ovout = ".data";
1786 if (htab->params->ovly_flavour == ovly_soft_icache)
1788 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1791 if (htab->toe != NULL)
1792 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1795 /* Functions to handle embedded spu_ovl.o object. */
1798 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1804 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1810 struct _ovl_stream *os;
1814 os = (struct _ovl_stream *) stream;
1815 max = (const char *) os->end - (const char *) os->start;
1817 if ((ufile_ptr) offset >= max)
1821 if (count > max - offset)
1822 count = max - offset;
1824 memcpy (buf, (const char *) os->start + offset, count);
1829 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1831 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1838 return *ovl_bfd != NULL;
1842 overlay_index (asection *sec)
1845 || sec->output_section == bfd_abs_section_ptr)
1847 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1850 /* Define an STT_OBJECT symbol. */
1852 static struct elf_link_hash_entry *
1853 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1855 struct elf_link_hash_entry *h;
1857 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1861 if (h->root.type != bfd_link_hash_defined
1864 h->root.type = bfd_link_hash_defined;
1865 h->root.u.def.section = htab->ovtab;
1866 h->type = STT_OBJECT;
1869 h->ref_regular_nonweak = 1;
1872 else if (h->root.u.def.section->owner != NULL)
1874 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1875 h->root.u.def.section->owner,
1876 h->root.root.string);
1877 bfd_set_error (bfd_error_bad_value);
1882 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1883 h->root.root.string);
1884 bfd_set_error (bfd_error_bad_value);
1891 /* Fill in all stubs and the overlay tables. */
1894 spu_elf_build_stubs (struct bfd_link_info *info)
1896 struct spu_link_hash_table *htab = spu_hash_table (info);
1897 struct elf_link_hash_entry *h;
1903 if (htab->num_overlays != 0)
1905 for (i = 0; i < 2; i++)
1907 h = htab->ovly_entry[i];
1909 && (h->root.type == bfd_link_hash_defined
1910 || h->root.type == bfd_link_hash_defweak)
1913 s = h->root.u.def.section->output_section;
1914 if (spu_elf_section_data (s)->u.o.ovl_index)
1916 (*_bfd_error_handler) (_("%s in overlay section"),
1917 h->root.root.string);
1918 bfd_set_error (bfd_error_bad_value);
1925 if (htab->stub_sec != NULL)
1927 for (i = 0; i <= htab->num_overlays; i++)
1928 if (htab->stub_sec[i]->size != 0)
1930 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1931 htab->stub_sec[i]->size);
1932 if (htab->stub_sec[i]->contents == NULL)
1934 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1935 htab->stub_sec[i]->size = 0;
1938 /* Fill in all the stubs. */
1939 process_stubs (info, TRUE);
1940 if (!htab->stub_err)
1941 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1945 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1946 bfd_set_error (bfd_error_bad_value);
1950 for (i = 0; i <= htab->num_overlays; i++)
1952 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1954 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1955 bfd_set_error (bfd_error_bad_value);
1958 htab->stub_sec[i]->rawsize = 0;
1962 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1965 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1966 if (htab->ovtab->contents == NULL)
1969 p = htab->ovtab->contents;
1970 if (htab->params->ovly_flavour == ovly_soft_icache)
1974 h = define_ovtab_symbol (htab, "__icache_tag_array");
1977 h->root.u.def.value = 0;
1978 h->size = 16 << htab->num_lines_log2;
1981 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1984 h->root.u.def.value = 16 << htab->num_lines_log2;
1985 h->root.u.def.section = bfd_abs_section_ptr;
1987 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1990 h->root.u.def.value = off;
1991 h->size = 16 << htab->num_lines_log2;
1994 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
1997 h->root.u.def.value = 16 << htab->num_lines_log2;
1998 h->root.u.def.section = bfd_abs_section_ptr;
2000 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2003 h->root.u.def.value = off;
2004 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2007 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2010 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2011 + htab->num_lines_log2);
2012 h->root.u.def.section = bfd_abs_section_ptr;
2014 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2017 h->root.u.def.value = htab->fromelem_size_log2;
2018 h->root.u.def.section = bfd_abs_section_ptr;
2020 h = define_ovtab_symbol (htab, "__icache_base");
2023 h->root.u.def.value = htab->ovl_sec[0]->vma;
2024 h->root.u.def.section = bfd_abs_section_ptr;
2025 h->size = htab->num_buf << htab->line_size_log2;
2027 h = define_ovtab_symbol (htab, "__icache_linesize");
2030 h->root.u.def.value = 1 << htab->line_size_log2;
2031 h->root.u.def.section = bfd_abs_section_ptr;
2033 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2036 h->root.u.def.value = htab->line_size_log2;
2037 h->root.u.def.section = bfd_abs_section_ptr;
2039 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2042 h->root.u.def.value = -htab->line_size_log2;
2043 h->root.u.def.section = bfd_abs_section_ptr;
2045 h = define_ovtab_symbol (htab, "__icache_cachesize");
2048 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2049 h->root.u.def.section = bfd_abs_section_ptr;
2051 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2054 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2055 h->root.u.def.section = bfd_abs_section_ptr;
2057 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2060 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2061 h->root.u.def.section = bfd_abs_section_ptr;
2063 if (htab->init != NULL && htab->init->size != 0)
2065 htab->init->contents = bfd_zalloc (htab->init->owner,
2067 if (htab->init->contents == NULL)
2070 h = define_ovtab_symbol (htab, "__icache_fileoff");
2073 h->root.u.def.value = 0;
2074 h->root.u.def.section = htab->init;
2080 /* Write out _ovly_table. */
2081 /* set low bit of .size to mark non-overlay area as present. */
2083 obfd = htab->ovtab->output_section->owner;
2084 for (s = obfd->sections; s != NULL; s = s->next)
2086 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2090 unsigned long off = ovl_index * 16;
2091 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2093 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2094 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2096 /* file_off written later in spu_elf_modify_program_headers. */
2097 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2101 h = define_ovtab_symbol (htab, "_ovly_table");
2104 h->root.u.def.value = 16;
2105 h->size = htab->num_overlays * 16;
2107 h = define_ovtab_symbol (htab, "_ovly_table_end");
2110 h->root.u.def.value = htab->num_overlays * 16 + 16;
2113 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2116 h->root.u.def.value = htab->num_overlays * 16 + 16;
2117 h->size = htab->num_buf * 4;
2119 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2122 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2126 h = define_ovtab_symbol (htab, "_EAR_");
2129 h->root.u.def.section = htab->toe;
2130 h->root.u.def.value = 0;
2136 /* Check that all loadable section VMAs lie in the range
2137 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2140 spu_elf_check_vma (struct bfd_link_info *info)
2142 struct elf_segment_map *m;
2144 struct spu_link_hash_table *htab = spu_hash_table (info);
2145 bfd *abfd = info->output_bfd;
2146 bfd_vma hi = htab->params->local_store_hi;
2147 bfd_vma lo = htab->params->local_store_lo;
2149 htab->local_store = hi + 1 - lo;
2151 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2152 if (m->p_type == PT_LOAD)
2153 for (i = 0; i < m->count; i++)
2154 if (m->sections[i]->size != 0
2155 && (m->sections[i]->vma < lo
2156 || m->sections[i]->vma > hi
2157 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2158 return m->sections[i];
2163 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2164 Search for stack adjusting insns, and return the sp delta.
2165 If a store of lr is found save the instruction offset to *LR_STORE.
2166 If a stack adjusting instruction is found, save that offset to
2170 find_function_stack_adjust (asection *sec,
2177 memset (reg, 0, sizeof (reg));
2178 for ( ; offset + 4 <= sec->size; offset += 4)
2180 unsigned char buf[4];
2184 /* Assume no relocs on stack adjusing insns. */
2185 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2189 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2191 if (buf[0] == 0x24 /* stqd */)
2193 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2198 /* Partly decoded immediate field. */
2199 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2201 if (buf[0] == 0x1c /* ai */)
2204 imm = (imm ^ 0x200) - 0x200;
2205 reg[rt] = reg[ra] + imm;
2207 if (rt == 1 /* sp */)
2211 *sp_adjust = offset;
2215 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2217 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2219 reg[rt] = reg[ra] + reg[rb];
2224 *sp_adjust = offset;
2228 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2230 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2232 reg[rt] = reg[rb] - reg[ra];
2237 *sp_adjust = offset;
2241 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2243 if (buf[0] >= 0x42 /* ila */)
2244 imm |= (buf[0] & 1) << 17;
2249 if (buf[0] == 0x40 /* il */)
2251 if ((buf[1] & 0x80) == 0)
2253 imm = (imm ^ 0x8000) - 0x8000;
2255 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2261 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2263 reg[rt] |= imm & 0xffff;
2266 else if (buf[0] == 0x04 /* ori */)
2269 imm = (imm ^ 0x200) - 0x200;
2270 reg[rt] = reg[ra] | imm;
2273 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2275 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2276 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2277 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2278 | ((imm & 0x1000) ? 0x000000ff : 0));
2281 else if (buf[0] == 0x16 /* andbi */)
2287 reg[rt] = reg[ra] & imm;
2290 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2292 /* Used in pic reg load. Say rt is trashed. Won't be used
2293 in stack adjust, but we need to continue past this branch. */
2297 else if (is_branch (buf) || is_indirect_branch (buf))
2298 /* If we hit a branch then we must be out of the prologue. */
2305 /* qsort predicate to sort symbols by section and value. */
2307 static Elf_Internal_Sym *sort_syms_syms;
2308 static asection **sort_syms_psecs;
2311 sort_syms (const void *a, const void *b)
2313 Elf_Internal_Sym *const *s1 = a;
2314 Elf_Internal_Sym *const *s2 = b;
2315 asection *sec1,*sec2;
2316 bfd_signed_vma delta;
2318 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2319 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2322 return sec1->index - sec2->index;
2324 delta = (*s1)->st_value - (*s2)->st_value;
2326 return delta < 0 ? -1 : 1;
2328 delta = (*s2)->st_size - (*s1)->st_size;
2330 return delta < 0 ? -1 : 1;
2332 return *s1 < *s2 ? -1 : 1;
2335 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2336 entries for section SEC. */
2338 static struct spu_elf_stack_info *
2339 alloc_stack_info (asection *sec, int max_fun)
2341 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2344 amt = sizeof (struct spu_elf_stack_info);
2345 amt += (max_fun - 1) * sizeof (struct function_info);
2346 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2347 if (sec_data->u.i.stack_info != NULL)
2348 sec_data->u.i.stack_info->max_fun = max_fun;
2349 return sec_data->u.i.stack_info;
2352 /* Add a new struct function_info describing a (part of a) function
2353 starting at SYM_H. Keep the array sorted by address. */
2355 static struct function_info *
2356 maybe_insert_function (asection *sec,
2359 bfd_boolean is_func)
2361 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2362 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2368 sinfo = alloc_stack_info (sec, 20);
2375 Elf_Internal_Sym *sym = sym_h;
2376 off = sym->st_value;
2377 size = sym->st_size;
2381 struct elf_link_hash_entry *h = sym_h;
2382 off = h->root.u.def.value;
2386 for (i = sinfo->num_fun; --i >= 0; )
2387 if (sinfo->fun[i].lo <= off)
2392 /* Don't add another entry for an alias, but do update some
2394 if (sinfo->fun[i].lo == off)
2396 /* Prefer globals over local syms. */
2397 if (global && !sinfo->fun[i].global)
2399 sinfo->fun[i].global = TRUE;
2400 sinfo->fun[i].u.h = sym_h;
2403 sinfo->fun[i].is_func = TRUE;
2404 return &sinfo->fun[i];
2406 /* Ignore a zero-size symbol inside an existing function. */
2407 else if (sinfo->fun[i].hi > off && size == 0)
2408 return &sinfo->fun[i];
2411 if (sinfo->num_fun >= sinfo->max_fun)
2413 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2414 bfd_size_type old = amt;
2416 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2417 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2418 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2419 sinfo = bfd_realloc (sinfo, amt);
2422 memset ((char *) sinfo + old, 0, amt - old);
2423 sec_data->u.i.stack_info = sinfo;
2426 if (++i < sinfo->num_fun)
2427 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2428 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2429 sinfo->fun[i].is_func = is_func;
2430 sinfo->fun[i].global = global;
2431 sinfo->fun[i].sec = sec;
2433 sinfo->fun[i].u.h = sym_h;
2435 sinfo->fun[i].u.sym = sym_h;
2436 sinfo->fun[i].lo = off;
2437 sinfo->fun[i].hi = off + size;
2438 sinfo->fun[i].lr_store = -1;
2439 sinfo->fun[i].sp_adjust = -1;
2440 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2441 &sinfo->fun[i].lr_store,
2442 &sinfo->fun[i].sp_adjust);
2443 sinfo->num_fun += 1;
2444 return &sinfo->fun[i];
2447 /* Return the name of FUN. */
2450 func_name (struct function_info *fun)
2454 Elf_Internal_Shdr *symtab_hdr;
2456 while (fun->start != NULL)
2460 return fun->u.h->root.root.string;
2463 if (fun->u.sym->st_name == 0)
2465 size_t len = strlen (sec->name);
2466 char *name = bfd_malloc (len + 10);
2469 sprintf (name, "%s+%lx", sec->name,
2470 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2474 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2475 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2478 /* Read the instruction at OFF in SEC. Return true iff the instruction
2479 is a nop, lnop, or stop 0 (all zero insn). */
2482 is_nop (asection *sec, bfd_vma off)
2484 unsigned char insn[4];
2486 if (off + 4 > sec->size
2487 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2489 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2491 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2496 /* Extend the range of FUN to cover nop padding up to LIMIT.
2497 Return TRUE iff some instruction other than a NOP was found. */
2500 insns_at_end (struct function_info *fun, bfd_vma limit)
2502 bfd_vma off = (fun->hi + 3) & -4;
2504 while (off < limit && is_nop (fun->sec, off))
2515 /* Check and fix overlapping function ranges. Return TRUE iff there
2516 are gaps in the current info we have about functions in SEC. */
2519 check_function_ranges (asection *sec, struct bfd_link_info *info)
2521 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2522 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2524 bfd_boolean gaps = FALSE;
2529 for (i = 1; i < sinfo->num_fun; i++)
2530 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2532 /* Fix overlapping symbols. */
2533 const char *f1 = func_name (&sinfo->fun[i - 1]);
2534 const char *f2 = func_name (&sinfo->fun[i]);
2536 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2537 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2539 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2542 if (sinfo->num_fun == 0)
2546 if (sinfo->fun[0].lo != 0)
2548 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2550 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2552 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2553 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2555 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2561 /* Search current function info for a function that contains address
2562 OFFSET in section SEC. */
2564 static struct function_info *
2565 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2567 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2568 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2572 hi = sinfo->num_fun;
2575 mid = (lo + hi) / 2;
2576 if (offset < sinfo->fun[mid].lo)
2578 else if (offset >= sinfo->fun[mid].hi)
2581 return &sinfo->fun[mid];
2583 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2585 bfd_set_error (bfd_error_bad_value);
2589 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2590 if CALLEE was new. If this function return FALSE, CALLEE should
2594 insert_callee (struct function_info *caller, struct call_info *callee)
2596 struct call_info **pp, *p;
2598 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2599 if (p->fun == callee->fun)
2601 /* Tail calls use less stack than normal calls. Retain entry
2602 for normal call over one for tail call. */
2603 p->is_tail &= callee->is_tail;
2606 p->fun->start = NULL;
2607 p->fun->is_func = TRUE;
2609 p->count += callee->count;
2610 /* Reorder list so most recent call is first. */
2612 p->next = caller->call_list;
2613 caller->call_list = p;
2616 callee->next = caller->call_list;
2617 caller->call_list = callee;
2621 /* Copy CALL and insert the copy into CALLER. */
2624 copy_callee (struct function_info *caller, const struct call_info *call)
2626 struct call_info *callee;
2627 callee = bfd_malloc (sizeof (*callee));
2631 if (!insert_callee (caller, callee))
2636 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2637 overlay stub sections. */
2640 interesting_section (asection *s)
2642 return (s->output_section != bfd_abs_section_ptr
2643 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2644 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2648 /* Rummage through the relocs for SEC, looking for function calls.
2649 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2650 mark destination symbols on calls as being functions. Also
2651 look at branches, which may be tail calls or go to hot/cold
2652 section part of same function. */
2655 mark_functions_via_relocs (asection *sec,
2656 struct bfd_link_info *info,
2659 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2660 Elf_Internal_Shdr *symtab_hdr;
2662 unsigned int priority = 0;
2663 static bfd_boolean warned;
2665 if (!interesting_section (sec)
2666 || sec->reloc_count == 0)
2669 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2671 if (internal_relocs == NULL)
2674 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2675 psyms = &symtab_hdr->contents;
2676 irela = internal_relocs;
2677 irelaend = irela + sec->reloc_count;
2678 for (; irela < irelaend; irela++)
2680 enum elf_spu_reloc_type r_type;
2681 unsigned int r_indx;
2683 Elf_Internal_Sym *sym;
2684 struct elf_link_hash_entry *h;
2686 bfd_boolean nonbranch, is_call;
2687 struct function_info *caller;
2688 struct call_info *callee;
2690 r_type = ELF32_R_TYPE (irela->r_info);
2691 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2693 r_indx = ELF32_R_SYM (irela->r_info);
2694 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2698 || sym_sec->output_section == bfd_abs_section_ptr)
2704 unsigned char insn[4];
2706 if (!bfd_get_section_contents (sec->owner, sec, insn,
2707 irela->r_offset, 4))
2709 if (is_branch (insn))
2711 is_call = (insn[0] & 0xfd) == 0x31;
2712 priority = insn[1] & 0x0f;
2714 priority |= insn[2];
2716 priority |= insn[3];
2718 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2719 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2722 info->callbacks->einfo
2723 (_("%B(%A+0x%v): call to non-code section"
2724 " %B(%A), analysis incomplete\n"),
2725 sec->owner, sec, irela->r_offset,
2726 sym_sec->owner, sym_sec);
2741 /* For --auto-overlay, count possible stubs we need for
2742 function pointer references. */
2743 unsigned int sym_type;
2747 sym_type = ELF_ST_TYPE (sym->st_info);
2748 if (sym_type == STT_FUNC)
2750 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2751 spu_hash_table (info)->non_ovly_stub += 1;
2752 /* If the symbol type is STT_FUNC then this must be a
2753 function pointer initialisation. */
2756 /* Ignore data references. */
2757 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2758 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2760 /* Otherwise we probably have a jump table reloc for
2761 a switch statement or some other reference to a
2766 val = h->root.u.def.value;
2768 val = sym->st_value;
2769 val += irela->r_addend;
2773 struct function_info *fun;
2775 if (irela->r_addend != 0)
2777 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2780 fake->st_value = val;
2782 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2786 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2788 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2791 if (irela->r_addend != 0
2792 && fun->u.sym != sym)
2797 caller = find_function (sec, irela->r_offset, info);
2800 callee = bfd_malloc (sizeof *callee);
2804 callee->fun = find_function (sym_sec, val, info);
2805 if (callee->fun == NULL)
2807 callee->is_tail = !is_call;
2808 callee->is_pasted = FALSE;
2809 callee->broken_cycle = FALSE;
2810 callee->priority = priority;
2811 callee->count = nonbranch? 0 : 1;
2812 if (callee->fun->last_caller != sec)
2814 callee->fun->last_caller = sec;
2815 callee->fun->call_count += 1;
2817 if (!insert_callee (caller, callee))
2820 && !callee->fun->is_func
2821 && callee->fun->stack == 0)
2823 /* This is either a tail call or a branch from one part of
2824 the function to another, ie. hot/cold section. If the
2825 destination has been called by some other function then
2826 it is a separate function. We also assume that functions
2827 are not split across input files. */
2828 if (sec->owner != sym_sec->owner)
2830 callee->fun->start = NULL;
2831 callee->fun->is_func = TRUE;
2833 else if (callee->fun->start == NULL)
2835 struct function_info *caller_start = caller;
2836 while (caller_start->start)
2837 caller_start = caller_start->start;
2839 if (caller_start != callee->fun)
2840 callee->fun->start = caller_start;
2844 struct function_info *callee_start;
2845 struct function_info *caller_start;
2846 callee_start = callee->fun;
2847 while (callee_start->start)
2848 callee_start = callee_start->start;
2849 caller_start = caller;
2850 while (caller_start->start)
2851 caller_start = caller_start->start;
2852 if (caller_start != callee_start)
2854 callee->fun->start = NULL;
2855 callee->fun->is_func = TRUE;
2864 /* Handle something like .init or .fini, which has a piece of a function.
2865 These sections are pasted together to form a single function. */
2868 pasted_function (asection *sec)
2870 struct bfd_link_order *l;
2871 struct _spu_elf_section_data *sec_data;
2872 struct spu_elf_stack_info *sinfo;
2873 Elf_Internal_Sym *fake;
2874 struct function_info *fun, *fun_start;
2876 fake = bfd_zmalloc (sizeof (*fake));
2880 fake->st_size = sec->size;
2882 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2883 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2887 /* Find a function immediately preceding this section. */
2889 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2891 if (l->u.indirect.section == sec)
2893 if (fun_start != NULL)
2895 struct call_info *callee = bfd_malloc (sizeof *callee);
2899 fun->start = fun_start;
2901 callee->is_tail = TRUE;
2902 callee->is_pasted = TRUE;
2903 callee->broken_cycle = FALSE;
2904 callee->priority = 0;
2906 if (!insert_callee (fun_start, callee))
2912 if (l->type == bfd_indirect_link_order
2913 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2914 && (sinfo = sec_data->u.i.stack_info) != NULL
2915 && sinfo->num_fun != 0)
2916 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2919 /* Don't return an error if we did not find a function preceding this
2920 section. The section may have incorrect flags. */
2924 /* Map address ranges in code sections to functions. */
2927 discover_functions (struct bfd_link_info *info)
2931 Elf_Internal_Sym ***psym_arr;
2932 asection ***sec_arr;
2933 bfd_boolean gaps = FALSE;
2936 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2939 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2940 if (psym_arr == NULL)
2942 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2943 if (sec_arr == NULL)
2946 for (ibfd = info->input_bfds, bfd_idx = 0;
2948 ibfd = ibfd->link_next, bfd_idx++)
2950 extern const bfd_target bfd_elf32_spu_vec;
2951 Elf_Internal_Shdr *symtab_hdr;
2954 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2955 asection **psecs, **p;
2957 if (ibfd->xvec != &bfd_elf32_spu_vec)
2960 /* Read all the symbols. */
2961 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2962 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2966 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2967 if (interesting_section (sec))
2975 if (symtab_hdr->contents != NULL)
2977 /* Don't use cached symbols since the generic ELF linker
2978 code only reads local symbols, and we need globals too. */
2979 free (symtab_hdr->contents);
2980 symtab_hdr->contents = NULL;
2982 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2984 symtab_hdr->contents = (void *) syms;
2988 /* Select defined function symbols that are going to be output. */
2989 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2992 psym_arr[bfd_idx] = psyms;
2993 psecs = bfd_malloc (symcount * sizeof (*psecs));
2996 sec_arr[bfd_idx] = psecs;
2997 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2998 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2999 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3003 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3004 if (s != NULL && interesting_section (s))
3007 symcount = psy - psyms;
3010 /* Sort them by section and offset within section. */
3011 sort_syms_syms = syms;
3012 sort_syms_psecs = psecs;
3013 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3015 /* Now inspect the function symbols. */
3016 for (psy = psyms; psy < psyms + symcount; )
3018 asection *s = psecs[*psy - syms];
3019 Elf_Internal_Sym **psy2;
3021 for (psy2 = psy; ++psy2 < psyms + symcount; )
3022 if (psecs[*psy2 - syms] != s)
3025 if (!alloc_stack_info (s, psy2 - psy))
3030 /* First install info about properly typed and sized functions.
3031 In an ideal world this will cover all code sections, except
3032 when partitioning functions into hot and cold sections,
3033 and the horrible pasted together .init and .fini functions. */
3034 for (psy = psyms; psy < psyms + symcount; ++psy)
3037 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3039 asection *s = psecs[sy - syms];
3040 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3045 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3046 if (interesting_section (sec))
3047 gaps |= check_function_ranges (sec, info);
3052 /* See if we can discover more function symbols by looking at
3054 for (ibfd = info->input_bfds, bfd_idx = 0;
3056 ibfd = ibfd->link_next, bfd_idx++)
3060 if (psym_arr[bfd_idx] == NULL)
3063 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3064 if (!mark_functions_via_relocs (sec, info, FALSE))
3068 for (ibfd = info->input_bfds, bfd_idx = 0;
3070 ibfd = ibfd->link_next, bfd_idx++)
3072 Elf_Internal_Shdr *symtab_hdr;
3074 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3077 if ((psyms = psym_arr[bfd_idx]) == NULL)
3080 psecs = sec_arr[bfd_idx];
3082 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3083 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3086 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3087 if (interesting_section (sec))
3088 gaps |= check_function_ranges (sec, info);
3092 /* Finally, install all globals. */
3093 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3097 s = psecs[sy - syms];
3099 /* Global syms might be improperly typed functions. */
3100 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3101 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3103 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3109 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3111 extern const bfd_target bfd_elf32_spu_vec;
3114 if (ibfd->xvec != &bfd_elf32_spu_vec)
3117 /* Some of the symbols we've installed as marking the
3118 beginning of functions may have a size of zero. Extend
3119 the range of such functions to the beginning of the
3120 next symbol of interest. */
3121 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3122 if (interesting_section (sec))
3124 struct _spu_elf_section_data *sec_data;
3125 struct spu_elf_stack_info *sinfo;
3127 sec_data = spu_elf_section_data (sec);
3128 sinfo = sec_data->u.i.stack_info;
3129 if (sinfo != NULL && sinfo->num_fun != 0)
3132 bfd_vma hi = sec->size;
3134 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3136 sinfo->fun[fun_idx].hi = hi;
3137 hi = sinfo->fun[fun_idx].lo;
3140 sinfo->fun[0].lo = 0;
3142 /* No symbols in this section. Must be .init or .fini
3143 or something similar. */
3144 else if (!pasted_function (sec))
3150 for (ibfd = info->input_bfds, bfd_idx = 0;
3152 ibfd = ibfd->link_next, bfd_idx++)
3154 if (psym_arr[bfd_idx] == NULL)
3157 free (psym_arr[bfd_idx]);
3158 free (sec_arr[bfd_idx]);
3167 /* Iterate over all function_info we have collected, calling DOIT on
3168 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3172 for_each_node (bfd_boolean (*doit) (struct function_info *,
3173 struct bfd_link_info *,
3175 struct bfd_link_info *info,
3181 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3183 extern const bfd_target bfd_elf32_spu_vec;
3186 if (ibfd->xvec != &bfd_elf32_spu_vec)
3189 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3191 struct _spu_elf_section_data *sec_data;
3192 struct spu_elf_stack_info *sinfo;
3194 if ((sec_data = spu_elf_section_data (sec)) != NULL
3195 && (sinfo = sec_data->u.i.stack_info) != NULL)
3198 for (i = 0; i < sinfo->num_fun; ++i)
3199 if (!root_only || !sinfo->fun[i].non_root)
3200 if (!doit (&sinfo->fun[i], info, param))
3208 /* Transfer call info attached to struct function_info entries for
3209 all of a given function's sections to the first entry. */
3212 transfer_calls (struct function_info *fun,
3213 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3214 void *param ATTRIBUTE_UNUSED)
3216 struct function_info *start = fun->start;
3220 struct call_info *call, *call_next;
3222 while (start->start != NULL)
3223 start = start->start;
3224 for (call = fun->call_list; call != NULL; call = call_next)
3226 call_next = call->next;
3227 if (!insert_callee (start, call))
3230 fun->call_list = NULL;
3235 /* Mark nodes in the call graph that are called by some other node. */
3238 mark_non_root (struct function_info *fun,
3239 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3240 void *param ATTRIBUTE_UNUSED)
3242 struct call_info *call;
3247 for (call = fun->call_list; call; call = call->next)
3249 call->fun->non_root = TRUE;
3250 mark_non_root (call->fun, 0, 0);
3255 /* Remove cycles from the call graph. Set depth of nodes. */
3258 remove_cycles (struct function_info *fun,
3259 struct bfd_link_info *info,
3262 struct call_info **callp, *call;
3263 unsigned int depth = *(unsigned int *) param;
3264 unsigned int max_depth = depth;
3268 fun->marking = TRUE;
3270 callp = &fun->call_list;
3271 while ((call = *callp) != NULL)
3273 call->max_depth = depth + !call->is_pasted;
3274 if (!call->fun->visit2)
3276 if (!remove_cycles (call->fun, info, &call->max_depth))
3278 if (max_depth < call->max_depth)
3279 max_depth = call->max_depth;
3281 else if (call->fun->marking)
3283 struct spu_link_hash_table *htab = spu_hash_table (info);
3285 if (!htab->params->auto_overlay
3286 && htab->params->stack_analysis)
3288 const char *f1 = func_name (fun);
3289 const char *f2 = func_name (call->fun);
3291 info->callbacks->info (_("Stack analysis will ignore the call "
3296 call->broken_cycle = TRUE;
3298 callp = &call->next;
3300 fun->marking = FALSE;
3301 *(unsigned int *) param = max_depth;
3305 /* Check that we actually visited all nodes in remove_cycles. If we
3306 didn't, then there is some cycle in the call graph not attached to
3307 any root node. Arbitrarily choose a node in the cycle as a new
3308 root and break the cycle. */
3311 mark_detached_root (struct function_info *fun,
3312 struct bfd_link_info *info,
3317 fun->non_root = FALSE;
3318 *(unsigned int *) param = 0;
3319 return remove_cycles (fun, info, param);
3322 /* Populate call_list for each function. */
3325 build_call_tree (struct bfd_link_info *info)
3330 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3332 extern const bfd_target bfd_elf32_spu_vec;
3335 if (ibfd->xvec != &bfd_elf32_spu_vec)
3338 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3339 if (!mark_functions_via_relocs (sec, info, TRUE))
3343 /* Transfer call info from hot/cold section part of function
3345 if (!spu_hash_table (info)->params->auto_overlay
3346 && !for_each_node (transfer_calls, info, 0, FALSE))
3349 /* Find the call graph root(s). */
3350 if (!for_each_node (mark_non_root, info, 0, FALSE))
3353 /* Remove cycles from the call graph. We start from the root node(s)
3354 so that we break cycles in a reasonable place. */
3356 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3359 return for_each_node (mark_detached_root, info, &depth, FALSE);
3362 /* qsort predicate to sort calls by priority, max_depth then count. */
3365 sort_calls (const void *a, const void *b)
3367 struct call_info *const *c1 = a;
3368 struct call_info *const *c2 = b;
3371 delta = (*c2)->priority - (*c1)->priority;
3375 delta = (*c2)->max_depth - (*c1)->max_depth;
3379 delta = (*c2)->count - (*c1)->count;
3383 return (char *) c1 - (char *) c2;
3387 unsigned int max_overlay_size;
3390 /* Set linker_mark and gc_mark on any sections that we will put in
3391 overlays. These flags are used by the generic ELF linker, but we
3392 won't be continuing on to bfd_elf_final_link so it is OK to use
3393 them. linker_mark is clear before we get here. Set segment_mark
3394 on sections that are part of a pasted function (excluding the last
3397 Set up function rodata section if --overlay-rodata. We don't
3398 currently include merged string constant rodata sections since
3400 Sort the call graph so that the deepest nodes will be visited
3404 mark_overlay_section (struct function_info *fun,
3405 struct bfd_link_info *info,
3408 struct call_info *call;
3410 struct _mos_param *mos_param = param;
3411 struct spu_link_hash_table *htab = spu_hash_table (info);
3417 if (!fun->sec->linker_mark
3418 && (htab->params->ovly_flavour != ovly_soft_icache
3419 || htab->params->non_ia_text
3420 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3421 || strcmp (fun->sec->name, ".init") == 0
3422 || strcmp (fun->sec->name, ".fini") == 0))
3426 fun->sec->linker_mark = 1;
3427 fun->sec->gc_mark = 1;
3428 fun->sec->segment_mark = 0;
3429 /* Ensure SEC_CODE is set on this text section (it ought to
3430 be!), and SEC_CODE is clear on rodata sections. We use
3431 this flag to differentiate the two overlay section types. */
3432 fun->sec->flags |= SEC_CODE;
3434 size = fun->sec->size;
3435 if (htab->params->auto_overlay & OVERLAY_RODATA)
3439 /* Find the rodata section corresponding to this function's
3441 if (strcmp (fun->sec->name, ".text") == 0)
3443 name = bfd_malloc (sizeof (".rodata"));
3446 memcpy (name, ".rodata", sizeof (".rodata"));
3448 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3450 size_t len = strlen (fun->sec->name);
3451 name = bfd_malloc (len + 3);
3454 memcpy (name, ".rodata", sizeof (".rodata"));
3455 memcpy (name + 7, fun->sec->name + 5, len - 4);
3457 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3459 size_t len = strlen (fun->sec->name) + 1;
3460 name = bfd_malloc (len);
3463 memcpy (name, fun->sec->name, len);
3469 asection *rodata = NULL;
3470 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3471 if (group_sec == NULL)
3472 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3474 while (group_sec != NULL && group_sec != fun->sec)
3476 if (strcmp (group_sec->name, name) == 0)
3481 group_sec = elf_section_data (group_sec)->next_in_group;
3483 fun->rodata = rodata;
3486 size += fun->rodata->size;
3487 if (htab->params->line_size != 0
3488 && size > htab->params->line_size)
3490 size -= fun->rodata->size;
3495 fun->rodata->linker_mark = 1;
3496 fun->rodata->gc_mark = 1;
3497 fun->rodata->flags &= ~SEC_CODE;
3503 if (mos_param->max_overlay_size < size)
3504 mos_param->max_overlay_size = size;
3507 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3512 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3516 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3517 calls[count++] = call;
3519 qsort (calls, count, sizeof (*calls), sort_calls);
3521 fun->call_list = NULL;
3525 calls[count]->next = fun->call_list;
3526 fun->call_list = calls[count];
3531 for (call = fun->call_list; call != NULL; call = call->next)
3533 if (call->is_pasted)
3535 /* There can only be one is_pasted call per function_info. */
3536 BFD_ASSERT (!fun->sec->segment_mark);
3537 fun->sec->segment_mark = 1;
3539 if (!call->broken_cycle
3540 && !mark_overlay_section (call->fun, info, param))
3544 /* Don't put entry code into an overlay. The overlay manager needs
3545 a stack! Also, don't mark .ovl.init as an overlay. */
3546 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3547 == info->output_bfd->start_address
3548 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3550 fun->sec->linker_mark = 0;
3551 if (fun->rodata != NULL)
3552 fun->rodata->linker_mark = 0;
3557 /* If non-zero then unmark functions called from those within sections
3558 that we need to unmark. Unfortunately this isn't reliable since the
3559 call graph cannot know the destination of function pointer calls. */
3560 #define RECURSE_UNMARK 0
3563 asection *exclude_input_section;
3564 asection *exclude_output_section;
3565 unsigned long clearing;
3568 /* Undo some of mark_overlay_section's work. */
3571 unmark_overlay_section (struct function_info *fun,
3572 struct bfd_link_info *info,
3575 struct call_info *call;
3576 struct _uos_param *uos_param = param;
3577 unsigned int excluded = 0;
3585 if (fun->sec == uos_param->exclude_input_section
3586 || fun->sec->output_section == uos_param->exclude_output_section)
3590 uos_param->clearing += excluded;
3592 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3594 fun->sec->linker_mark = 0;
3596 fun->rodata->linker_mark = 0;
3599 for (call = fun->call_list; call != NULL; call = call->next)
3600 if (!call->broken_cycle
3601 && !unmark_overlay_section (call->fun, info, param))
3605 uos_param->clearing -= excluded;
3610 unsigned int lib_size;
3611 asection **lib_sections;
3614 /* Add sections we have marked as belonging to overlays to an array
3615 for consideration as non-overlay sections. The array consist of
3616 pairs of sections, (text,rodata), for functions in the call graph. */
3619 collect_lib_sections (struct function_info *fun,
3620 struct bfd_link_info *info,
3623 struct _cl_param *lib_param = param;
3624 struct call_info *call;
3631 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3634 size = fun->sec->size;
3636 size += fun->rodata->size;
3638 if (size <= lib_param->lib_size)
3640 *lib_param->lib_sections++ = fun->sec;
3641 fun->sec->gc_mark = 0;
3642 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3644 *lib_param->lib_sections++ = fun->rodata;
3645 fun->rodata->gc_mark = 0;
3648 *lib_param->lib_sections++ = NULL;
3651 for (call = fun->call_list; call != NULL; call = call->next)
3652 if (!call->broken_cycle)
3653 collect_lib_sections (call->fun, info, param);
3658 /* qsort predicate to sort sections by call count. */
3661 sort_lib (const void *a, const void *b)
3663 asection *const *s1 = a;
3664 asection *const *s2 = b;
3665 struct _spu_elf_section_data *sec_data;
3666 struct spu_elf_stack_info *sinfo;
3670 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3671 && (sinfo = sec_data->u.i.stack_info) != NULL)
3674 for (i = 0; i < sinfo->num_fun; ++i)
3675 delta -= sinfo->fun[i].call_count;
3678 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3679 && (sinfo = sec_data->u.i.stack_info) != NULL)
3682 for (i = 0; i < sinfo->num_fun; ++i)
3683 delta += sinfo->fun[i].call_count;
3692 /* Remove some sections from those marked to be in overlays. Choose
3693 those that are called from many places, likely library functions. */
3696 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3699 asection **lib_sections;
3700 unsigned int i, lib_count;
3701 struct _cl_param collect_lib_param;
3702 struct function_info dummy_caller;
3703 struct spu_link_hash_table *htab;
3705 memset (&dummy_caller, 0, sizeof (dummy_caller));
3707 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3709 extern const bfd_target bfd_elf32_spu_vec;
3712 if (ibfd->xvec != &bfd_elf32_spu_vec)
3715 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3716 if (sec->linker_mark
3717 && sec->size < lib_size
3718 && (sec->flags & SEC_CODE) != 0)
3721 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3722 if (lib_sections == NULL)
3723 return (unsigned int) -1;
3724 collect_lib_param.lib_size = lib_size;
3725 collect_lib_param.lib_sections = lib_sections;
3726 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3728 return (unsigned int) -1;
3729 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3731 /* Sort sections so that those with the most calls are first. */
3733 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3735 htab = spu_hash_table (info);
3736 for (i = 0; i < lib_count; i++)
3738 unsigned int tmp, stub_size;
3740 struct _spu_elf_section_data *sec_data;
3741 struct spu_elf_stack_info *sinfo;
3743 sec = lib_sections[2 * i];
3744 /* If this section is OK, its size must be less than lib_size. */
3746 /* If it has a rodata section, then add that too. */
3747 if (lib_sections[2 * i + 1])
3748 tmp += lib_sections[2 * i + 1]->size;
3749 /* Add any new overlay call stubs needed by the section. */
3752 && (sec_data = spu_elf_section_data (sec)) != NULL
3753 && (sinfo = sec_data->u.i.stack_info) != NULL)
3756 struct call_info *call;
3758 for (k = 0; k < sinfo->num_fun; ++k)
3759 for (call = sinfo->fun[k].call_list; call; call = call->next)
3760 if (call->fun->sec->linker_mark)
3762 struct call_info *p;
3763 for (p = dummy_caller.call_list; p; p = p->next)
3764 if (p->fun == call->fun)
3767 stub_size += ovl_stub_size (htab->params);
3770 if (tmp + stub_size < lib_size)
3772 struct call_info **pp, *p;
3774 /* This section fits. Mark it as non-overlay. */
3775 lib_sections[2 * i]->linker_mark = 0;
3776 if (lib_sections[2 * i + 1])
3777 lib_sections[2 * i + 1]->linker_mark = 0;
3778 lib_size -= tmp + stub_size;
3779 /* Call stubs to the section we just added are no longer
3781 pp = &dummy_caller.call_list;
3782 while ((p = *pp) != NULL)
3783 if (!p->fun->sec->linker_mark)
3785 lib_size += ovl_stub_size (htab->params);
3791 /* Add new call stubs to dummy_caller. */
3792 if ((sec_data = spu_elf_section_data (sec)) != NULL
3793 && (sinfo = sec_data->u.i.stack_info) != NULL)
3796 struct call_info *call;
3798 for (k = 0; k < sinfo->num_fun; ++k)
3799 for (call = sinfo->fun[k].call_list;
3802 if (call->fun->sec->linker_mark)
3804 struct call_info *callee;
3805 callee = bfd_malloc (sizeof (*callee));
3807 return (unsigned int) -1;
3809 if (!insert_callee (&dummy_caller, callee))
3815 while (dummy_caller.call_list != NULL)
3817 struct call_info *call = dummy_caller.call_list;
3818 dummy_caller.call_list = call->next;
3821 for (i = 0; i < 2 * lib_count; i++)
3822 if (lib_sections[i])
3823 lib_sections[i]->gc_mark = 1;
3824 free (lib_sections);
3828 /* Build an array of overlay sections. The deepest node's section is
3829 added first, then its parent node's section, then everything called
3830 from the parent section. The idea being to group sections to
3831 minimise calls between different overlays. */
3834 collect_overlays (struct function_info *fun,
3835 struct bfd_link_info *info,
3838 struct call_info *call;
3839 bfd_boolean added_fun;
3840 asection ***ovly_sections = param;
3846 for (call = fun->call_list; call != NULL; call = call->next)
3847 if (!call->is_pasted && !call->broken_cycle)
3849 if (!collect_overlays (call->fun, info, ovly_sections))
3855 if (fun->sec->linker_mark && fun->sec->gc_mark)
3857 fun->sec->gc_mark = 0;
3858 *(*ovly_sections)++ = fun->sec;
3859 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3861 fun->rodata->gc_mark = 0;
3862 *(*ovly_sections)++ = fun->rodata;
3865 *(*ovly_sections)++ = NULL;
3868 /* Pasted sections must stay with the first section. We don't
3869 put pasted sections in the array, just the first section.
3870 Mark subsequent sections as already considered. */
3871 if (fun->sec->segment_mark)
3873 struct function_info *call_fun = fun;
3876 for (call = call_fun->call_list; call != NULL; call = call->next)
3877 if (call->is_pasted)
3879 call_fun = call->fun;
3880 call_fun->sec->gc_mark = 0;
3881 if (call_fun->rodata)
3882 call_fun->rodata->gc_mark = 0;
3888 while (call_fun->sec->segment_mark);
3892 for (call = fun->call_list; call != NULL; call = call->next)
3893 if (!call->broken_cycle
3894 && !collect_overlays (call->fun, info, ovly_sections))
3899 struct _spu_elf_section_data *sec_data;
3900 struct spu_elf_stack_info *sinfo;
3902 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3903 && (sinfo = sec_data->u.i.stack_info) != NULL)
3906 for (i = 0; i < sinfo->num_fun; ++i)
3907 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3915 struct _sum_stack_param {
3917 size_t overall_stack;
3918 bfd_boolean emit_stack_syms;
3921 /* Descend the call graph for FUN, accumulating total stack required. */
3924 sum_stack (struct function_info *fun,
3925 struct bfd_link_info *info,
3928 struct call_info *call;
3929 struct function_info *max;
3930 size_t stack, cum_stack;
3932 bfd_boolean has_call;
3933 struct _sum_stack_param *sum_stack_param = param;
3934 struct spu_link_hash_table *htab;
3936 cum_stack = fun->stack;
3937 sum_stack_param->cum_stack = cum_stack;
3943 for (call = fun->call_list; call; call = call->next)
3945 if (call->broken_cycle)
3947 if (!call->is_pasted)
3949 if (!sum_stack (call->fun, info, sum_stack_param))
3951 stack = sum_stack_param->cum_stack;
3952 /* Include caller stack for normal calls, don't do so for
3953 tail calls. fun->stack here is local stack usage for
3955 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3956 stack += fun->stack;
3957 if (cum_stack < stack)
3964 sum_stack_param->cum_stack = cum_stack;
3966 /* Now fun->stack holds cumulative stack. */
3967 fun->stack = cum_stack;
3971 && sum_stack_param->overall_stack < cum_stack)
3972 sum_stack_param->overall_stack = cum_stack;
3974 htab = spu_hash_table (info);
3975 if (htab->params->auto_overlay)
3978 f1 = func_name (fun);
3979 if (htab->params->stack_analysis)
3982 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3983 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3984 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3988 info->callbacks->minfo (_(" calls:\n"));
3989 for (call = fun->call_list; call; call = call->next)
3990 if (!call->is_pasted && !call->broken_cycle)
3992 const char *f2 = func_name (call->fun);
3993 const char *ann1 = call->fun == max ? "*" : " ";
3994 const char *ann2 = call->is_tail ? "t" : " ";
3996 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
4001 if (sum_stack_param->emit_stack_syms)
4003 char *name = bfd_malloc (18 + strlen (f1));
4004 struct elf_link_hash_entry *h;
4009 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4010 sprintf (name, "__stack_%s", f1);
4012 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4014 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4017 && (h->root.type == bfd_link_hash_new
4018 || h->root.type == bfd_link_hash_undefined
4019 || h->root.type == bfd_link_hash_undefweak))
4021 h->root.type = bfd_link_hash_defined;
4022 h->root.u.def.section = bfd_abs_section_ptr;
4023 h->root.u.def.value = cum_stack;
4028 h->ref_regular_nonweak = 1;
4029 h->forced_local = 1;
4037 /* SEC is part of a pasted function. Return the call_info for the
4038 next section of this function. */
4040 static struct call_info *
4041 find_pasted_call (asection *sec)
4043 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4044 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4045 struct call_info *call;
4048 for (k = 0; k < sinfo->num_fun; ++k)
4049 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4050 if (call->is_pasted)
4056 /* qsort predicate to sort bfds by file name. */
4059 sort_bfds (const void *a, const void *b)
4061 bfd *const *abfd1 = a;
4062 bfd *const *abfd2 = b;
4064 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
4068 print_one_overlay_section (FILE *script,
4071 unsigned int ovlynum,
4072 unsigned int *ovly_map,
4073 asection **ovly_sections,
4074 struct bfd_link_info *info)
4078 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4080 asection *sec = ovly_sections[2 * j];
4082 if (fprintf (script, " %s%c%s (%s)\n",
4083 (sec->owner->my_archive != NULL
4084 ? sec->owner->my_archive->filename : ""),
4085 info->path_separator,
4086 sec->owner->filename,
4089 if (sec->segment_mark)
4091 struct call_info *call = find_pasted_call (sec);
4092 while (call != NULL)
4094 struct function_info *call_fun = call->fun;
4095 sec = call_fun->sec;
4096 if (fprintf (script, " %s%c%s (%s)\n",
4097 (sec->owner->my_archive != NULL
4098 ? sec->owner->my_archive->filename : ""),
4099 info->path_separator,
4100 sec->owner->filename,
4103 for (call = call_fun->call_list; call; call = call->next)
4104 if (call->is_pasted)
4110 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4112 asection *sec = ovly_sections[2 * j + 1];
4114 && fprintf (script, " %s%c%s (%s)\n",
4115 (sec->owner->my_archive != NULL
4116 ? sec->owner->my_archive->filename : ""),
4117 info->path_separator,
4118 sec->owner->filename,
4122 sec = ovly_sections[2 * j];
4123 if (sec->segment_mark)
4125 struct call_info *call = find_pasted_call (sec);
4126 while (call != NULL)
4128 struct function_info *call_fun = call->fun;
4129 sec = call_fun->rodata;
4131 && fprintf (script, " %s%c%s (%s)\n",
4132 (sec->owner->my_archive != NULL
4133 ? sec->owner->my_archive->filename : ""),
4134 info->path_separator,
4135 sec->owner->filename,
4138 for (call = call_fun->call_list; call; call = call->next)
4139 if (call->is_pasted)
4148 /* Handle --auto-overlay. */
4151 spu_elf_auto_overlay (struct bfd_link_info *info)
4155 struct elf_segment_map *m;
4156 unsigned int fixed_size, lo, hi;
4157 unsigned int reserved;
4158 struct spu_link_hash_table *htab;
4159 unsigned int base, i, count, bfd_count;
4160 unsigned int region, ovlynum;
4161 asection **ovly_sections, **ovly_p;
4162 unsigned int *ovly_map;
4164 unsigned int total_overlay_size, overlay_size;
4165 const char *ovly_mgr_entry;
4166 struct elf_link_hash_entry *h;
4167 struct _mos_param mos_param;
4168 struct _uos_param uos_param;
4169 struct function_info dummy_caller;
4171 /* Find the extents of our loadable image. */
4172 lo = (unsigned int) -1;
4174 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4175 if (m->p_type == PT_LOAD)
4176 for (i = 0; i < m->count; i++)
4177 if (m->sections[i]->size != 0)
4179 if (m->sections[i]->vma < lo)
4180 lo = m->sections[i]->vma;
4181 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4182 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4184 fixed_size = hi + 1 - lo;
4186 if (!discover_functions (info))
4189 if (!build_call_tree (info))
4192 htab = spu_hash_table (info);
4193 reserved = htab->params->auto_overlay_reserved;
4196 struct _sum_stack_param sum_stack_param;
4198 sum_stack_param.emit_stack_syms = 0;
4199 sum_stack_param.overall_stack = 0;
4200 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4202 reserved = (sum_stack_param.overall_stack
4203 + htab->params->extra_stack_space);
4206 /* No need for overlays if everything already fits. */
4207 if (fixed_size + reserved <= htab->local_store
4208 && htab->params->ovly_flavour != ovly_soft_icache)
4210 htab->params->auto_overlay = 0;
4214 uos_param.exclude_input_section = 0;
4215 uos_param.exclude_output_section
4216 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4218 ovly_mgr_entry = "__ovly_load";
4219 if (htab->params->ovly_flavour == ovly_soft_icache)
4220 ovly_mgr_entry = "__icache_br_handler";
4221 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4222 FALSE, FALSE, FALSE);
4224 && (h->root.type == bfd_link_hash_defined
4225 || h->root.type == bfd_link_hash_defweak)
4228 /* We have a user supplied overlay manager. */
4229 uos_param.exclude_input_section = h->root.u.def.section;
4233 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4234 builtin version to .text, and will adjust .text size. */
4235 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4238 /* Mark overlay sections, and find max overlay section size. */
4239 mos_param.max_overlay_size = 0;
4240 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4243 /* We can't put the overlay manager or interrupt routines in
4245 uos_param.clearing = 0;
4246 if ((uos_param.exclude_input_section
4247 || uos_param.exclude_output_section)
4248 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4252 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4254 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4255 if (bfd_arr == NULL)
4258 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4261 total_overlay_size = 0;
4262 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4264 extern const bfd_target bfd_elf32_spu_vec;
4266 unsigned int old_count;
4268 if (ibfd->xvec != &bfd_elf32_spu_vec)
4272 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4273 if (sec->linker_mark)
4275 if ((sec->flags & SEC_CODE) != 0)
4277 fixed_size -= sec->size;
4278 total_overlay_size += sec->size;
4280 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4281 && sec->output_section->owner == info->output_bfd
4282 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4283 fixed_size -= sec->size;
4284 if (count != old_count)
4285 bfd_arr[bfd_count++] = ibfd;
4288 /* Since the overlay link script selects sections by file name and
4289 section name, ensure that file names are unique. */
4292 bfd_boolean ok = TRUE;
4294 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4295 for (i = 1; i < bfd_count; ++i)
4296 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4298 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4300 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4301 info->callbacks->einfo (_("%s duplicated in %s\n"),
4302 bfd_arr[i]->filename,
4303 bfd_arr[i]->my_archive->filename);
4305 info->callbacks->einfo (_("%s duplicated\n"),
4306 bfd_arr[i]->filename);
4312 info->callbacks->einfo (_("sorry, no support for duplicate "
4313 "object files in auto-overlay script\n"));
4314 bfd_set_error (bfd_error_bad_value);
4320 fixed_size += reserved;
4321 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4322 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4324 if (htab->params->ovly_flavour == ovly_soft_icache)
4326 /* Stubs in the non-icache area are bigger. */
4327 fixed_size += htab->non_ovly_stub * 16;
4328 /* Space for icache manager tables.
4329 a) Tag array, one quadword per cache line.
4330 - word 0: ia address of present line, init to zero. */
4331 fixed_size += 16 << htab->num_lines_log2;
4332 /* b) Rewrite "to" list, one quadword per cache line. */
4333 fixed_size += 16 << htab->num_lines_log2;
4334 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4335 to a power-of-two number of full quadwords) per cache line. */
4336 fixed_size += 16 << (htab->fromelem_size_log2
4337 + htab->num_lines_log2);
4338 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4343 /* Guess number of overlays. Assuming overlay buffer is on
4344 average only half full should be conservative. */
4345 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4346 / (htab->local_store - fixed_size));
4347 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4348 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4352 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4353 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4354 "size of 0x%v exceeds local store\n"),
4355 (bfd_vma) fixed_size,
4356 (bfd_vma) mos_param.max_overlay_size);
4358 /* Now see if we should put some functions in the non-overlay area. */
4359 else if (fixed_size < htab->params->auto_overlay_fixed)
4361 unsigned int max_fixed, lib_size;
4363 max_fixed = htab->local_store - mos_param.max_overlay_size;
4364 if (max_fixed > htab->params->auto_overlay_fixed)
4365 max_fixed = htab->params->auto_overlay_fixed;
4366 lib_size = max_fixed - fixed_size;
4367 lib_size = auto_ovl_lib_functions (info, lib_size);
4368 if (lib_size == (unsigned int) -1)
4370 fixed_size = max_fixed - lib_size;
4373 /* Build an array of sections, suitably sorted to place into
4375 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4376 if (ovly_sections == NULL)
4378 ovly_p = ovly_sections;
4379 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4381 count = (size_t) (ovly_p - ovly_sections) / 2;
4382 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4383 if (ovly_map == NULL)
4386 memset (&dummy_caller, 0, sizeof (dummy_caller));
4387 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4388 if (htab->params->line_size != 0)
4389 overlay_size = htab->params->line_size;
4392 while (base < count)
4394 unsigned int size = 0, rosize = 0, roalign = 0;
4396 for (i = base; i < count; i++)
4398 asection *sec, *rosec;
4399 unsigned int tmp, rotmp;
4400 unsigned int num_stubs;
4401 struct call_info *call, *pasty;
4402 struct _spu_elf_section_data *sec_data;
4403 struct spu_elf_stack_info *sinfo;
4406 /* See whether we can add this section to the current
4407 overlay without overflowing our overlay buffer. */
4408 sec = ovly_sections[2 * i];
4409 tmp = align_power (size, sec->alignment_power) + sec->size;
4411 rosec = ovly_sections[2 * i + 1];
4414 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4415 if (roalign < rosec->alignment_power)
4416 roalign = rosec->alignment_power;
4418 if (align_power (tmp, roalign) + rotmp > overlay_size)
4420 if (sec->segment_mark)
4422 /* Pasted sections must stay together, so add their
4424 struct call_info *pasty = find_pasted_call (sec);
4425 while (pasty != NULL)
4427 struct function_info *call_fun = pasty->fun;
4428 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4429 + call_fun->sec->size);
4430 if (call_fun->rodata)
4432 rotmp = (align_power (rotmp,
4433 call_fun->rodata->alignment_power)
4434 + call_fun->rodata->size);
4435 if (roalign < rosec->alignment_power)
4436 roalign = rosec->alignment_power;
4438 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4439 if (pasty->is_pasted)
4443 if (align_power (tmp, roalign) + rotmp > overlay_size)
4446 /* If we add this section, we might need new overlay call
4447 stubs. Add any overlay section calls to dummy_call. */
4449 sec_data = spu_elf_section_data (sec);
4450 sinfo = sec_data->u.i.stack_info;
4451 for (k = 0; k < sinfo->num_fun; ++k)
4452 for (call = sinfo->fun[k].call_list; call; call = call->next)
4453 if (call->is_pasted)
4455 BFD_ASSERT (pasty == NULL);
4458 else if (call->fun->sec->linker_mark)
4460 if (!copy_callee (&dummy_caller, call))
4463 while (pasty != NULL)
4465 struct function_info *call_fun = pasty->fun;
4467 for (call = call_fun->call_list; call; call = call->next)
4468 if (call->is_pasted)
4470 BFD_ASSERT (pasty == NULL);
4473 else if (!copy_callee (&dummy_caller, call))
4477 /* Calculate call stub size. */
4479 for (call = dummy_caller.call_list; call; call = call->next)
4482 unsigned int stub_delta = 1;
4484 if (htab->params->ovly_flavour == ovly_soft_icache)
4485 stub_delta = call->count;
4486 num_stubs += stub_delta;
4488 /* If the call is within this overlay, we won't need a
4490 for (k = base; k < i + 1; k++)
4491 if (call->fun->sec == ovly_sections[2 * k])
4493 num_stubs -= stub_delta;
4497 if (htab->params->ovly_flavour == ovly_soft_icache
4498 && num_stubs > htab->params->max_branch)
4500 if (align_power (tmp, roalign) + rotmp
4501 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4509 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4510 ovly_sections[2 * i]->owner,
4511 ovly_sections[2 * i],
4512 ovly_sections[2 * i + 1] ? " + rodata" : "");
4513 bfd_set_error (bfd_error_bad_value);
4517 while (dummy_caller.call_list != NULL)
4519 struct call_info *call = dummy_caller.call_list;
4520 dummy_caller.call_list = call->next;
4526 ovly_map[base++] = ovlynum;
4529 script = htab->params->spu_elf_open_overlay_script ();
4531 if (htab->params->ovly_flavour == ovly_soft_icache)
4533 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4536 if (fprintf (script,
4537 " . = ALIGN (%u);\n"
4538 " .ovl.init : { *(.ovl.init) }\n"
4539 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4540 htab->params->line_size) <= 0)
4545 while (base < count)
4547 unsigned int indx = ovlynum - 1;
4548 unsigned int vma, lma;
4550 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4551 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4553 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4554 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4555 ovlynum, vma, lma) <= 0)
4558 base = print_one_overlay_section (script, base, count, ovlynum,
4559 ovly_map, ovly_sections, info);
4560 if (base == (unsigned) -1)
4563 if (fprintf (script, " }\n") <= 0)
4569 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4570 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4573 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4578 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4581 if (fprintf (script,
4582 " . = ALIGN (16);\n"
4583 " .ovl.init : { *(.ovl.init) }\n"
4584 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4587 for (region = 1; region <= htab->params->num_lines; region++)
4591 while (base < count && ovly_map[base] < ovlynum)
4599 /* We need to set lma since we are overlaying .ovl.init. */
4600 if (fprintf (script,
4601 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4606 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4610 while (base < count)
4612 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4615 base = print_one_overlay_section (script, base, count, ovlynum,
4616 ovly_map, ovly_sections, info);
4617 if (base == (unsigned) -1)
4620 if (fprintf (script, " }\n") <= 0)
4623 ovlynum += htab->params->num_lines;
4624 while (base < count && ovly_map[base] < ovlynum)
4628 if (fprintf (script, " }\n") <= 0)
4632 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4637 free (ovly_sections);
4639 if (fclose (script) != 0)
4642 if (htab->params->auto_overlay & AUTO_RELINK)
4643 (*htab->params->spu_elf_relink) ();
4648 bfd_set_error (bfd_error_system_call);
4650 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4654 /* Provide an estimate of total stack required. */
4657 spu_elf_stack_analysis (struct bfd_link_info *info)
4659 struct spu_link_hash_table *htab;
4660 struct _sum_stack_param sum_stack_param;
4662 if (!discover_functions (info))
4665 if (!build_call_tree (info))
4668 htab = spu_hash_table (info);
4669 if (htab->params->stack_analysis)
4671 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4672 info->callbacks->minfo (_("\nStack size for functions. "
4673 "Annotations: '*' max stack, 't' tail call\n"));
4676 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4677 sum_stack_param.overall_stack = 0;
4678 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4681 if (htab->params->stack_analysis)
4682 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4683 (bfd_vma) sum_stack_param.overall_stack);
4687 /* Perform a final link. */
4690 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4692 struct spu_link_hash_table *htab = spu_hash_table (info);
4694 if (htab->params->auto_overlay)
4695 spu_elf_auto_overlay (info);
4697 if ((htab->params->stack_analysis
4698 || (htab->params->ovly_flavour == ovly_soft_icache
4699 && htab->params->lrlive_analysis))
4700 && !spu_elf_stack_analysis (info))
4701 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4703 if (!spu_elf_build_stubs (info))
4704 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4706 return bfd_elf_final_link (output_bfd, info);
4709 /* Called when not normally emitting relocs, ie. !info->relocatable
4710 and !info->emitrelocations. Returns a count of special relocs
4711 that need to be emitted. */
4714 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4716 Elf_Internal_Rela *relocs;
4717 unsigned int count = 0;
4719 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4723 Elf_Internal_Rela *rel;
4724 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4726 for (rel = relocs; rel < relend; rel++)
4728 int r_type = ELF32_R_TYPE (rel->r_info);
4729 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4733 if (elf_section_data (sec)->relocs != relocs)
4740 /* Functions for adding fixup records to .fixup */
4742 #define FIXUP_RECORD_SIZE 4
4744 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4745 bfd_put_32 (output_bfd, addr, \
4746 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4747 #define FIXUP_GET(output_bfd,htab,index) \
4748 bfd_get_32 (output_bfd, \
4749 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4751 /* Store OFFSET in .fixup. This assumes it will be called with an
4752 increasing OFFSET. When this OFFSET fits with the last base offset,
4753 it just sets a bit, otherwise it adds a new fixup record. */
4755 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4758 struct spu_link_hash_table *htab = spu_hash_table (info);
4759 asection *sfixup = htab->sfixup;
4760 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4761 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4762 if (sfixup->reloc_count == 0)
4764 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4765 sfixup->reloc_count++;
4769 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4770 if (qaddr != (base & ~(bfd_vma) 15))
4772 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4773 (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4774 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4775 sfixup->reloc_count++;
4778 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4782 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4785 spu_elf_relocate_section (bfd *output_bfd,
4786 struct bfd_link_info *info,
4788 asection *input_section,
4790 Elf_Internal_Rela *relocs,
4791 Elf_Internal_Sym *local_syms,
4792 asection **local_sections)
4794 Elf_Internal_Shdr *symtab_hdr;
4795 struct elf_link_hash_entry **sym_hashes;
4796 Elf_Internal_Rela *rel, *relend;
4797 struct spu_link_hash_table *htab;
4800 bfd_boolean emit_these_relocs = FALSE;
4801 bfd_boolean is_ea_sym;
4803 unsigned int iovl = 0;
4805 htab = spu_hash_table (info);
4806 stubs = (htab->stub_sec != NULL
4807 && maybe_needs_stubs (input_section));
4808 iovl = overlay_index (input_section);
4809 ea = bfd_get_section_by_name (output_bfd, "._ea");
4810 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4811 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4814 relend = relocs + input_section->reloc_count;
4815 for (; rel < relend; rel++)
4818 reloc_howto_type *howto;
4819 unsigned int r_symndx;
4820 Elf_Internal_Sym *sym;
4822 struct elf_link_hash_entry *h;
4823 const char *sym_name;
4826 bfd_reloc_status_type r;
4827 bfd_boolean unresolved_reloc;
4829 enum _stub_type stub_type;
4831 r_symndx = ELF32_R_SYM (rel->r_info);
4832 r_type = ELF32_R_TYPE (rel->r_info);
4833 howto = elf_howto_table + r_type;
4834 unresolved_reloc = FALSE;
4839 if (r_symndx < symtab_hdr->sh_info)
4841 sym = local_syms + r_symndx;
4842 sec = local_sections[r_symndx];
4843 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4844 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4848 if (sym_hashes == NULL)
4851 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4853 while (h->root.type == bfd_link_hash_indirect
4854 || h->root.type == bfd_link_hash_warning)
4855 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4858 if (h->root.type == bfd_link_hash_defined
4859 || h->root.type == bfd_link_hash_defweak)
4861 sec = h->root.u.def.section;
4863 || sec->output_section == NULL)
4864 /* Set a flag that will be cleared later if we find a
4865 relocation value for this symbol. output_section
4866 is typically NULL for symbols satisfied by a shared
4868 unresolved_reloc = TRUE;
4870 relocation = (h->root.u.def.value
4871 + sec->output_section->vma
4872 + sec->output_offset);
4874 else if (h->root.type == bfd_link_hash_undefweak)
4876 else if (info->unresolved_syms_in_objects == RM_IGNORE
4877 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4879 else if (!info->relocatable
4880 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4883 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4884 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4885 if (!info->callbacks->undefined_symbol (info,
4886 h->root.root.string,
4889 rel->r_offset, err))
4893 sym_name = h->root.root.string;
4896 if (sec != NULL && elf_discarded_section (sec))
4898 /* For relocs against symbols from removed linkonce sections,
4899 or sections discarded by a linker script, we just want the
4900 section contents zeroed. Avoid any special processing. */
4901 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4907 if (info->relocatable)
4910 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4911 if (r_type == R_SPU_ADD_PIC
4913 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4915 bfd_byte *loc = contents + rel->r_offset;
4921 is_ea_sym = (ea != NULL
4923 && sec->output_section == ea);
4925 /* If this symbol is in an overlay area, we may need to relocate
4926 to the overlay stub. */
4927 addend = rel->r_addend;
4930 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4931 contents, info)) != no_stub)
4933 unsigned int ovl = 0;
4934 struct got_entry *g, **head;
4936 if (stub_type != nonovl_stub)
4940 head = &h->got.glist;
4942 head = elf_local_got_ents (input_bfd) + r_symndx;
4944 for (g = *head; g != NULL; g = g->next)
4945 if (htab->params->ovly_flavour == ovly_soft_icache
4947 && g->br_addr == (rel->r_offset
4948 + input_section->output_offset
4949 + input_section->output_section->vma))
4950 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4955 relocation = g->stub_addr;
4960 /* For soft icache, encode the overlay index into addresses. */
4961 if (htab->params->ovly_flavour == ovly_soft_icache
4962 && (r_type == R_SPU_ADDR16_HI
4963 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4966 unsigned int ovl = overlay_index (sec);
4969 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4970 relocation += set_id << 18;
4975 if (htab->params->emit_fixups && !info->relocatable
4976 && (input_section->flags & SEC_ALLOC) != 0
4977 && r_type == R_SPU_ADDR32)
4980 offset = rel->r_offset + input_section->output_section->vma
4981 + input_section->output_offset;
4982 spu_elf_emit_fixup (output_bfd, info, offset);
4985 if (unresolved_reloc)
4987 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4991 /* ._ea is a special section that isn't allocated in SPU
4992 memory, but rather occupies space in PPU memory as
4993 part of an embedded ELF image. If this reloc is
4994 against a symbol defined in ._ea, then transform the
4995 reloc into an equivalent one without a symbol
4996 relative to the start of the ELF image. */
4997 rel->r_addend += (relocation
4999 + elf_section_data (ea)->this_hdr.sh_offset);
5000 rel->r_info = ELF32_R_INFO (0, r_type);
5002 emit_these_relocs = TRUE;
5006 unresolved_reloc = TRUE;
5008 if (unresolved_reloc)
5010 (*_bfd_error_handler)
5011 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5013 bfd_get_section_name (input_bfd, input_section),
5014 (long) rel->r_offset,
5020 r = _bfd_final_link_relocate (howto,
5024 rel->r_offset, relocation, addend);
5026 if (r != bfd_reloc_ok)
5028 const char *msg = (const char *) 0;
5032 case bfd_reloc_overflow:
5033 if (!((*info->callbacks->reloc_overflow)
5034 (info, (h ? &h->root : NULL), sym_name, howto->name,
5035 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5039 case bfd_reloc_undefined:
5040 if (!((*info->callbacks->undefined_symbol)
5041 (info, sym_name, input_bfd, input_section,
5042 rel->r_offset, TRUE)))
5046 case bfd_reloc_outofrange:
5047 msg = _("internal error: out of range error");
5050 case bfd_reloc_notsupported:
5051 msg = _("internal error: unsupported relocation error");
5054 case bfd_reloc_dangerous:
5055 msg = _("internal error: dangerous error");
5059 msg = _("internal error: unknown error");
5064 if (!((*info->callbacks->warning)
5065 (info, msg, sym_name, input_bfd, input_section,
5074 && emit_these_relocs
5075 && !info->emitrelocations)
5077 Elf_Internal_Rela *wrel;
5078 Elf_Internal_Shdr *rel_hdr;
5080 wrel = rel = relocs;
5081 relend = relocs + input_section->reloc_count;
5082 for (; rel < relend; rel++)
5086 r_type = ELF32_R_TYPE (rel->r_info);
5087 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5090 input_section->reloc_count = wrel - relocs;
5091 /* Backflips for _bfd_elf_link_output_relocs. */
5092 rel_hdr = &elf_section_data (input_section)->rel_hdr;
5093 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5100 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5103 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5104 const char *sym_name ATTRIBUTE_UNUSED,
5105 Elf_Internal_Sym *sym,
5106 asection *sym_sec ATTRIBUTE_UNUSED,
5107 struct elf_link_hash_entry *h)
5109 struct spu_link_hash_table *htab = spu_hash_table (info);
5111 if (!info->relocatable
5112 && htab->stub_sec != NULL
5114 && (h->root.type == bfd_link_hash_defined
5115 || h->root.type == bfd_link_hash_defweak)
5117 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5119 struct got_entry *g;
5121 for (g = h->got.glist; g != NULL; g = g->next)
5122 if (htab->params->ovly_flavour == ovly_soft_icache
5123 ? g->br_addr == g->stub_addr
5124 : g->addend == 0 && g->ovl == 0)
5126 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5127 (htab->stub_sec[0]->output_section->owner,
5128 htab->stub_sec[0]->output_section));
5129 sym->st_value = g->stub_addr;
5137 static int spu_plugin = 0;
5140 spu_elf_plugin (int val)
5145 /* Set ELF header e_type for plugins. */
5148 spu_elf_post_process_headers (bfd *abfd,
5149 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5153 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5155 i_ehdrp->e_type = ET_DYN;
5159 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5160 segments for overlays. */
5163 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5170 struct spu_link_hash_table *htab = spu_hash_table (info);
5171 extra = htab->num_overlays;
5177 sec = bfd_get_section_by_name (abfd, ".toe");
5178 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5184 /* Remove .toe section from other PT_LOAD segments and put it in
5185 a segment of its own. Put overlays in separate segments too. */
5188 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5191 struct elf_segment_map *m, *m_overlay;
5192 struct elf_segment_map **p, **p_overlay;
5198 toe = bfd_get_section_by_name (abfd, ".toe");
5199 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5200 if (m->p_type == PT_LOAD && m->count > 1)
5201 for (i = 0; i < m->count; i++)
5202 if ((s = m->sections[i]) == toe
5203 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5205 struct elf_segment_map *m2;
5208 if (i + 1 < m->count)
5210 amt = sizeof (struct elf_segment_map);
5211 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5212 m2 = bfd_zalloc (abfd, amt);
5215 m2->count = m->count - (i + 1);
5216 memcpy (m2->sections, m->sections + i + 1,
5217 m2->count * sizeof (m->sections[0]));
5218 m2->p_type = PT_LOAD;
5226 amt = sizeof (struct elf_segment_map);
5227 m2 = bfd_zalloc (abfd, amt);
5230 m2->p_type = PT_LOAD;
5232 m2->sections[0] = s;
5240 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5241 PT_LOAD segments. This can cause the .ovl.init section to be
5242 overwritten with the contents of some overlay segment. To work
5243 around this issue, we ensure that all PF_OVERLAY segments are
5244 sorted first amongst the program headers; this ensures that even
5245 with a broken loader, the .ovl.init section (which is not marked
5246 as PF_OVERLAY) will be placed into SPU local store on startup. */
5248 /* Move all overlay segments onto a separate list. */
5249 p = &elf_tdata (abfd)->segment_map;
5250 p_overlay = &m_overlay;
5253 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5254 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5256 struct elf_segment_map *m = *p;
5259 p_overlay = &m->next;
5266 /* Re-insert overlay segments at the head of the segment map. */
5267 *p_overlay = elf_tdata (abfd)->segment_map;
5268 elf_tdata (abfd)->segment_map = m_overlay;
5273 /* Tweak the section type of .note.spu_name. */
5276 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5277 Elf_Internal_Shdr *hdr,
5280 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5281 hdr->sh_type = SHT_NOTE;
5285 /* Tweak phdrs before writing them out. */
5288 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5290 const struct elf_backend_data *bed;
5291 struct elf_obj_tdata *tdata;
5292 Elf_Internal_Phdr *phdr, *last;
5293 struct spu_link_hash_table *htab;
5300 bed = get_elf_backend_data (abfd);
5301 tdata = elf_tdata (abfd);
5303 count = tdata->program_header_size / bed->s->sizeof_phdr;
5304 htab = spu_hash_table (info);
5305 if (htab->num_overlays != 0)
5307 struct elf_segment_map *m;
5310 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5312 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5314 /* Mark this as an overlay header. */
5315 phdr[i].p_flags |= PF_OVERLAY;
5317 if (htab->ovtab != NULL && htab->ovtab->size != 0
5318 && htab->params->ovly_flavour != ovly_soft_icache)
5320 bfd_byte *p = htab->ovtab->contents;
5321 unsigned int off = o * 16 + 8;
5323 /* Write file_off into _ovly_table. */
5324 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5327 /* Soft-icache has its file offset put in .ovl.init. */
5328 if (htab->init != NULL && htab->init->size != 0)
5330 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5332 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5336 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5337 of 16. This should always be possible when using the standard
5338 linker scripts, but don't create overlapping segments if
5339 someone is playing games with linker scripts. */
5341 for (i = count; i-- != 0; )
5342 if (phdr[i].p_type == PT_LOAD)
5346 adjust = -phdr[i].p_filesz & 15;
5349 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5352 adjust = -phdr[i].p_memsz & 15;
5355 && phdr[i].p_filesz != 0
5356 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5357 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5360 if (phdr[i].p_filesz != 0)
5364 if (i == (unsigned int) -1)
5365 for (i = count; i-- != 0; )
5366 if (phdr[i].p_type == PT_LOAD)
5370 adjust = -phdr[i].p_filesz & 15;
5371 phdr[i].p_filesz += adjust;
5373 adjust = -phdr[i].p_memsz & 15;
5374 phdr[i].p_memsz += adjust;
5381 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5383 struct spu_link_hash_table *htab = spu_hash_table (info);
5384 if (htab->params->emit_fixups)
5386 asection *sfixup = htab->sfixup;
5387 int fixup_count = 0;
5391 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5395 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5398 /* Walk over each section attached to the input bfd. */
5399 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5401 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5404 /* If there aren't any relocs, then there's nothing more
5406 if ((isec->flags & SEC_RELOC) == 0
5407 || isec->reloc_count == 0)
5410 /* Get the relocs. */
5412 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5414 if (internal_relocs == NULL)
5417 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5418 relocations. They are stored in a single word by
5419 saving the upper 28 bits of the address and setting the
5420 lower 4 bits to a bit mask of the words that have the
5421 relocation. BASE_END keeps track of the next quadword. */
5422 irela = internal_relocs;
5423 irelaend = irela + isec->reloc_count;
5425 for (; irela < irelaend; irela++)
5426 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5427 && irela->r_offset >= base_end)
5429 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5435 /* We always have a NULL fixup as a sentinel */
5436 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5437 if (!bfd_set_section_size (output_bfd, sfixup, size))
5439 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5440 if (sfixup->contents == NULL)
5446 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5447 #define TARGET_BIG_NAME "elf32-spu"
5448 #define ELF_ARCH bfd_arch_spu
5449 #define ELF_MACHINE_CODE EM_SPU
5450 /* This matches the alignment need for DMA. */
5451 #define ELF_MAXPAGESIZE 0x80
5452 #define elf_backend_rela_normal 1
5453 #define elf_backend_can_gc_sections 1
5455 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5456 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5457 #define elf_info_to_howto spu_elf_info_to_howto
5458 #define elf_backend_count_relocs spu_elf_count_relocs
5459 #define elf_backend_relocate_section spu_elf_relocate_section
5460 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5461 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5462 #define elf_backend_object_p spu_elf_object_p
5463 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5464 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5466 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5467 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5468 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5469 #define elf_backend_post_process_headers spu_elf_post_process_headers
5470 #define elf_backend_fake_sections spu_elf_fake_sections
5471 #define elf_backend_special_sections spu_elf_special_sections
5472 #define bfd_elf32_bfd_final_link spu_elf_final_link
5474 #include "elf32-target.h"