1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104 case BFD_RELOC_SPU_IMM10W:
106 case BFD_RELOC_SPU_IMM16W:
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
114 case BFD_RELOC_SPU_PCREL16:
116 case BFD_RELOC_SPU_IMM7:
118 case BFD_RELOC_SPU_IMM8:
120 case BFD_RELOC_SPU_PCREL9a:
122 case BFD_RELOC_SPU_PCREL9b:
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
130 case BFD_RELOC_32_PCREL:
132 case BFD_RELOC_SPU_PPU32:
134 case BFD_RELOC_SPU_PPU64:
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
142 Elf_Internal_Rela *dst)
144 enum elf_spu_reloc_type r_type;
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
157 if (r_type == R_SPU_NONE)
160 return elf_howto_table + r_type;
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
184 bfd_size_type octets;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
199 /* Get symbol value. */
201 if (!bfd_is_com_section (symbol->section))
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
206 val += reloc_entry->addend;
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
229 if (!sec->used_by_bfd)
231 struct _spu_elf_section_data *sdata;
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
236 sec->used_by_bfd = sdata;
239 return _bfd_elf_new_section_hook (abfd, sec);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf;
260 /* Shortcuts to overlay sections. */
265 /* Count of stubs in each overlay section. */
266 unsigned int *stub_count;
268 /* The stub section for each overlay section. */
271 struct elf_link_hash_entry *ovly_load;
272 struct elf_link_hash_entry *ovly_return;
273 unsigned long ovly_load_r_symndx;
275 /* Number of overlay buffers. */
276 unsigned int num_buf;
278 /* Total number of overlays. */
279 unsigned int num_overlays;
281 /* Set if we should emit symbols for stubs. */
282 unsigned int emit_stub_syms:1;
284 /* Set if we want stubs on calls out of overlay regions to
285 non-overlay regions. */
286 unsigned int non_overlay_stubs : 1;
289 unsigned int stub_err : 1;
291 /* Set if stack size analysis should be done. */
292 unsigned int stack_analysis : 1;
294 /* Set if __stack_* syms will be emitted. */
295 unsigned int emit_stack_syms : 1;
298 /* Hijack the generic got fields for overlay stub accounting. */
302 struct got_entry *next;
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
311 /* Create a spu ELF linker hash table. */
313 static struct bfd_link_hash_table *
314 spu_elf_link_hash_table_create (bfd *abfd)
316 struct spu_link_hash_table *htab;
318 htab = bfd_malloc (sizeof (*htab));
322 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
323 _bfd_elf_link_hash_newfunc,
324 sizeof (struct elf_link_hash_entry)))
330 memset (&htab->ovtab, 0,
331 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
333 htab->elf.init_got_refcount.refcount = 0;
334 htab->elf.init_got_refcount.glist = NULL;
335 htab->elf.init_got_offset.offset = 0;
336 htab->elf.init_got_offset.glist = NULL;
337 return &htab->elf.root;
340 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
341 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
342 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
345 get_sym_h (struct elf_link_hash_entry **hp,
346 Elf_Internal_Sym **symp,
348 Elf_Internal_Sym **locsymsp,
349 unsigned long r_symndx,
352 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
354 if (r_symndx >= symtab_hdr->sh_info)
356 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
357 struct elf_link_hash_entry *h;
359 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
360 while (h->root.type == bfd_link_hash_indirect
361 || h->root.type == bfd_link_hash_warning)
362 h = (struct elf_link_hash_entry *) h->root.u.i.link;
372 asection *symsec = NULL;
373 if (h->root.type == bfd_link_hash_defined
374 || h->root.type == bfd_link_hash_defweak)
375 symsec = h->root.u.def.section;
381 Elf_Internal_Sym *sym;
382 Elf_Internal_Sym *locsyms = *locsymsp;
386 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
389 size_t symcount = symtab_hdr->sh_info;
391 /* If we are reading symbols into the contents, then
392 read the global syms too. This is done to cache
393 syms for later stack analysis. */
394 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
395 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
396 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
403 sym = locsyms + r_symndx;
412 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
418 /* Create the note section if not already present. This is done early so
419 that the linker maps the sections to the right place in the output. */
422 spu_elf_create_sections (bfd *output_bfd,
423 struct bfd_link_info *info,
428 struct spu_link_hash_table *htab = spu_hash_table (info);
430 /* Stash some options away where we can get at them later. */
431 htab->stack_analysis = stack_analysis;
432 htab->emit_stack_syms = emit_stack_syms;
434 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
435 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
440 /* Make SPU_PTNOTE_SPUNAME section. */
447 ibfd = info->input_bfds;
448 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
449 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
451 || !bfd_set_section_alignment (ibfd, s, 4))
454 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
455 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
456 size += (name_len + 3) & -4;
458 if (!bfd_set_section_size (ibfd, s, size))
461 data = bfd_zalloc (ibfd, size);
465 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
466 bfd_put_32 (ibfd, name_len, data + 4);
467 bfd_put_32 (ibfd, 1, data + 8);
468 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
469 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
470 bfd_get_filename (output_bfd), name_len);
477 /* qsort predicate to sort sections by vma. */
480 sort_sections (const void *a, const void *b)
482 const asection *const *s1 = a;
483 const asection *const *s2 = b;
484 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
487 return delta < 0 ? -1 : 1;
489 return (*s1)->index - (*s2)->index;
492 /* Identify overlays in the output bfd, and number them. */
495 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
497 struct spu_link_hash_table *htab = spu_hash_table (info);
498 asection **alloc_sec;
499 unsigned int i, n, ovl_index, num_buf;
503 if (output_bfd->section_count < 2)
506 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
507 if (alloc_sec == NULL)
510 /* Pick out all the alloced sections. */
511 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
512 if ((s->flags & SEC_ALLOC) != 0
513 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
523 /* Sort them by vma. */
524 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
526 /* Look for overlapping vmas. Any with overlap must be overlays.
527 Count them. Also count the number of overlay regions. */
528 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
529 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
532 if (s->vma < ovl_end)
534 asection *s0 = alloc_sec[i - 1];
536 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
538 alloc_sec[ovl_index] = s0;
539 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
540 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
542 alloc_sec[ovl_index] = s;
543 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
544 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
545 if (s0->vma != s->vma)
547 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
548 "do not start at the same address.\n"),
552 if (ovl_end < s->vma + s->size)
553 ovl_end = s->vma + s->size;
556 ovl_end = s->vma + s->size;
559 htab->num_overlays = ovl_index;
560 htab->num_buf = num_buf;
561 htab->ovl_sec = alloc_sec;
562 return ovl_index != 0;
565 /* Support two sizes of overlay stubs, a slower more compact stub of two
566 intructions, and a faster stub of four instructions. */
567 #ifndef OVL_STUB_SIZE
568 /* Default to faster. */
569 #define OVL_STUB_SIZE 16
570 /* #define OVL_STUB_SIZE 8 */
572 #define BRSL 0x33000000
573 #define BR 0x32000000
574 #define NOP 0x40200000
575 #define LNOP 0x00200000
576 #define ILA 0x42000000
578 /* Return true for all relative and absolute branch instructions.
586 brhnz 00100011 0.. */
589 is_branch (const unsigned char *insn)
591 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
594 /* Return true for all indirect branch instructions.
602 bihnz 00100101 011 */
605 is_indirect_branch (const unsigned char *insn)
607 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
610 /* Return true for branch hint instructions.
615 is_hint (const unsigned char *insn)
617 return (insn[0] & 0xfc) == 0x10;
620 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
623 needs_ovl_stub (const char *sym_name,
625 asection *input_section,
626 struct spu_link_hash_table *htab,
627 bfd_boolean is_branch)
629 if (htab->num_overlays == 0)
633 || sym_sec->output_section == NULL
634 || spu_elf_section_data (sym_sec->output_section) == NULL)
637 /* setjmp always goes via an overlay stub, because then the return
638 and hence the longjmp goes via __ovly_return. That magically
639 makes setjmp/longjmp between overlays work. */
640 if (strncmp (sym_name, "setjmp", 6) == 0
641 && (sym_name[6] == '\0' || sym_name[6] == '@'))
644 /* Usually, symbols in non-overlay sections don't need stubs. */
645 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
646 && !htab->non_overlay_stubs)
649 /* A reference from some other section to a symbol in an overlay
650 section needs a stub. */
651 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
652 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
655 /* If this insn isn't a branch then we are possibly taking the
656 address of a function and passing it out somehow. */
660 enum _insn_type { non_branch, branch, call };
663 count_stub (struct spu_link_hash_table *htab,
666 enum _insn_type insn_type,
667 struct elf_link_hash_entry *h,
668 const Elf_Internal_Rela *irela)
670 unsigned int ovl = 0;
671 struct got_entry *g, **head;
674 /* If this instruction is a branch or call, we need a stub
675 for it. One stub per function per overlay.
676 If it isn't a branch, then we are taking the address of
677 this function so need a stub in the non-overlay area
678 for it. One stub per function. */
679 if (insn_type != non_branch)
680 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
683 head = &h->got.glist;
686 if (elf_local_got_ents (ibfd) == NULL)
688 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
689 * sizeof (*elf_local_got_ents (ibfd)));
690 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
691 if (elf_local_got_ents (ibfd) == NULL)
694 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
699 addend = irela->r_addend;
703 struct got_entry *gnext;
705 for (g = *head; g != NULL; g = g->next)
706 if (g->addend == addend && g->ovl == 0)
711 /* Need a new non-overlay area stub. Zap other stubs. */
712 for (g = *head; g != NULL; g = gnext)
715 if (g->addend == addend)
717 htab->stub_count[g->ovl] -= 1;
725 for (g = *head; g != NULL; g = g->next)
726 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
732 g = bfd_malloc (sizeof *g);
737 g->stub_addr = (bfd_vma) -1;
741 htab->stub_count[ovl] += 1;
747 /* Two instruction overlay stubs look like:
750 .word target_ovl_and_address
752 ovl_and_address is a word with the overlay number in the top 14 bits
753 and local store address in the bottom 18 bits.
755 Four instruction overlay stubs look like:
759 ila $79,target_address
763 build_stub (struct spu_link_hash_table *htab,
766 enum _insn_type insn_type,
767 struct elf_link_hash_entry *h,
768 const Elf_Internal_Rela *irela,
773 struct got_entry *g, **head;
775 bfd_vma addend, val, from, to;
778 if (insn_type != non_branch)
779 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
782 head = &h->got.glist;
784 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
788 addend = irela->r_addend;
790 for (g = *head; g != NULL; g = g->next)
791 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
796 if (g->ovl == 0 && ovl != 0)
799 if (g->stub_addr != (bfd_vma) -1)
802 sec = htab->stub_sec[ovl];
803 dest += dest_sec->output_offset + dest_sec->output_section->vma;
804 from = sec->size + sec->output_offset + sec->output_section->vma;
806 to = (htab->ovly_load->root.u.def.value
807 + htab->ovly_load->root.u.def.section->output_offset
808 + htab->ovly_load->root.u.def.section->output_section->vma);
810 if (OVL_STUB_SIZE == 16)
812 if (((dest | to | from) & 3) != 0
813 || val + 0x20000 >= 0x40000)
818 ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
820 if (OVL_STUB_SIZE == 16)
822 bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
823 sec->contents + sec->size);
824 bfd_put_32 (sec->owner, LNOP,
825 sec->contents + sec->size + 4);
826 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
827 sec->contents + sec->size + 8);
828 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
829 sec->contents + sec->size + 12);
831 else if (OVL_STUB_SIZE == 8)
833 bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
834 sec->contents + sec->size);
836 val = (dest & 0x3ffff) | (ovl << 14);
837 bfd_put_32 (sec->owner, val,
838 sec->contents + sec->size + 4);
842 sec->size += OVL_STUB_SIZE;
844 if (htab->emit_stub_syms)
850 len = 8 + sizeof (".ovl_call.") - 1;
852 len += strlen (h->root.root.string);
857 add = (int) irela->r_addend & 0xffffffff;
860 name = bfd_malloc (len);
864 sprintf (name, "%08x.ovl_call.", g->ovl);
866 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
868 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
869 dest_sec->id & 0xffffffff,
870 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
872 sprintf (name + len - 9, "+%x", add);
874 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
878 if (h->root.type == bfd_link_hash_new)
880 h->root.type = bfd_link_hash_defined;
881 h->root.u.def.section = sec;
882 h->root.u.def.value = sec->size - OVL_STUB_SIZE;
883 h->size = OVL_STUB_SIZE;
887 h->ref_regular_nonweak = 1;
896 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
900 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
902 /* Symbols starting with _SPUEAR_ need a stub because they may be
903 invoked by the PPU. */
904 if ((h->root.type == bfd_link_hash_defined
905 || h->root.type == bfd_link_hash_defweak)
907 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
909 struct spu_link_hash_table *htab = inf;
911 count_stub (htab, NULL, NULL, non_branch, h, NULL);
918 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
920 /* Symbols starting with _SPUEAR_ need a stub because they may be
921 invoked by the PPU. */
922 if ((h->root.type == bfd_link_hash_defined
923 || h->root.type == bfd_link_hash_defweak)
925 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
927 struct spu_link_hash_table *htab = inf;
929 build_stub (htab, NULL, NULL, non_branch, h, NULL,
930 h->root.u.def.value, h->root.u.def.section);
936 /* Size or build stubs. */
939 process_stubs (bfd *output_bfd,
940 struct bfd_link_info *info,
943 struct spu_link_hash_table *htab = spu_hash_table (info);
946 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
948 extern const bfd_target bfd_elf32_spu_vec;
949 Elf_Internal_Shdr *symtab_hdr;
951 Elf_Internal_Sym *local_syms = NULL;
954 if (ibfd->xvec != &bfd_elf32_spu_vec)
957 /* We'll need the symbol table in a second. */
958 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
959 if (symtab_hdr->sh_info == 0)
962 /* Arrange to read and keep global syms for later stack analysis. */
964 if (htab->stack_analysis)
965 psyms = &symtab_hdr->contents;
967 /* Walk over each section attached to the input bfd. */
968 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
970 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
972 /* If there aren't any relocs, then there's nothing more to do. */
973 if ((isec->flags & SEC_RELOC) == 0
974 || (isec->flags & SEC_ALLOC) == 0
975 || (isec->flags & SEC_LOAD) == 0
976 || isec->reloc_count == 0)
979 /* If this section is a link-once section that will be
980 discarded, then don't create any stubs. */
981 if (isec->output_section == NULL
982 || isec->output_section->owner != output_bfd)
985 /* Get the relocs. */
986 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
988 if (internal_relocs == NULL)
989 goto error_ret_free_local;
991 /* Now examine each relocation. */
992 irela = internal_relocs;
993 irelaend = irela + isec->reloc_count;
994 for (; irela < irelaend; irela++)
996 enum elf_spu_reloc_type r_type;
999 Elf_Internal_Sym *sym;
1000 struct elf_link_hash_entry *h;
1001 const char *sym_name;
1002 unsigned int sym_type;
1003 enum _insn_type insn_type;
1005 r_type = ELF32_R_TYPE (irela->r_info);
1006 r_indx = ELF32_R_SYM (irela->r_info);
1008 if (r_type >= R_SPU_max)
1010 bfd_set_error (bfd_error_bad_value);
1011 error_ret_free_internal:
1012 if (elf_section_data (isec)->relocs != internal_relocs)
1013 free (internal_relocs);
1014 error_ret_free_local:
1015 if (local_syms != NULL
1016 && (symtab_hdr->contents
1017 != (unsigned char *) local_syms))
1022 /* Determine the reloc target section. */
1023 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
1024 goto error_ret_free_internal;
1027 || sym_sec->output_section == NULL
1028 || sym_sec->output_section->owner != output_bfd)
1031 /* Ensure no stubs for user supplied overlay manager syms. */
1033 && (strcmp (h->root.root.string, "__ovly_load") == 0
1034 || strcmp (h->root.root.string, "__ovly_return") == 0))
1037 insn_type = non_branch;
1038 if (r_type == R_SPU_REL16
1039 || r_type == R_SPU_ADDR16)
1041 unsigned char insn[4];
1043 if (!bfd_get_section_contents (ibfd, isec, insn,
1044 irela->r_offset, 4))
1045 goto error_ret_free_internal;
1047 if (is_branch (insn) || is_hint (insn))
1050 if ((insn[0] & 0xfd) == 0x31)
1055 /* We are only interested in function symbols. */
1059 sym_name = h->root.root.string;
1063 sym_type = ELF_ST_TYPE (sym->st_info);
1064 sym_name = bfd_elf_sym_name (sym_sec->owner,
1070 if (sym_type != STT_FUNC)
1072 /* It's common for people to write assembly and forget
1073 to give function symbols the right type. Handle
1074 calls to such symbols, but warn so that (hopefully)
1075 people will fix their code. We need the symbol
1076 type to be correct to distinguish function pointer
1077 initialisation from other pointer initialisation. */
1078 if (insn_type == call)
1079 (*_bfd_error_handler) (_("warning: call to non-function"
1080 " symbol %s defined in %B"),
1081 sym_sec->owner, sym_name);
1082 else if (insn_type == non_branch)
1086 if (!needs_ovl_stub (sym_name, sym_sec, isec, htab,
1087 insn_type != non_branch))
1090 if (htab->stub_count == NULL)
1093 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1094 htab->stub_count = bfd_zmalloc (amt);
1095 if (htab->stub_count == NULL)
1096 goto error_ret_free_internal;
1101 if (!count_stub (htab, ibfd, isec, insn_type, h, irela))
1102 goto error_ret_free_internal;
1109 dest = h->root.u.def.value;
1111 dest = sym->st_value;
1112 dest += irela->r_addend;
1113 if (!build_stub (htab, ibfd, isec, insn_type, h, irela,
1115 goto error_ret_free_internal;
1119 /* We're done with the internal relocs, free them. */
1120 if (elf_section_data (isec)->relocs != internal_relocs)
1121 free (internal_relocs);
1124 if (local_syms != NULL
1125 && symtab_hdr->contents != (unsigned char *) local_syms)
1127 if (!info->keep_memory)
1130 symtab_hdr->contents = (unsigned char *) local_syms;
1137 /* Allocate space for overlay call and return stubs. */
1140 spu_elf_size_stubs (bfd *output_bfd,
1141 struct bfd_link_info *info,
1142 void (*place_spu_section) (asection *, asection *,
1144 int non_overlay_stubs)
1146 struct spu_link_hash_table *htab = spu_hash_table (info);
1153 htab->non_overlay_stubs = non_overlay_stubs;
1154 if (!process_stubs (output_bfd, info, FALSE))
1157 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, htab);
1161 if (htab->stub_count == NULL)
1164 ibfd = info->input_bfds;
1165 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1166 htab->stub_sec = bfd_zmalloc (amt);
1167 if (htab->stub_sec == NULL)
1170 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1171 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1172 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1173 htab->stub_sec[0] = stub;
1175 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1177 stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1178 (*place_spu_section) (stub, NULL, ".text");
1180 for (i = 0; i < htab->num_overlays; ++i)
1182 asection *osec = htab->ovl_sec[i];
1183 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1184 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1185 htab->stub_sec[ovl] = stub;
1187 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1189 stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1190 (*place_spu_section) (stub, osec, NULL);
1193 /* htab->ovtab consists of two arrays.
1203 . } _ovly_buf_table[];
1206 flags = (SEC_ALLOC | SEC_LOAD
1207 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1208 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1209 if (htab->ovtab == NULL
1210 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1213 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1214 (*place_spu_section) (htab->ovtab, NULL, ".data");
1216 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1217 if (htab->toe == NULL
1218 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1220 htab->toe->size = 16;
1221 (*place_spu_section) (htab->toe, NULL, ".toe");
1226 /* Functions to handle embedded spu_ovl.o object. */
1229 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1235 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1241 struct _ovl_stream *os;
1245 os = (struct _ovl_stream *) stream;
1246 max = (const char *) os->end - (const char *) os->start;
1248 if ((ufile_ptr) offset >= max)
1252 if (count > max - offset)
1253 count = max - offset;
1255 memcpy (buf, (const char *) os->start + offset, count);
1260 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1262 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1269 return *ovl_bfd != NULL;
1272 /* Define an STT_OBJECT symbol. */
1274 static struct elf_link_hash_entry *
1275 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1277 struct elf_link_hash_entry *h;
1279 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1283 if (h->root.type != bfd_link_hash_defined
1286 h->root.type = bfd_link_hash_defined;
1287 h->root.u.def.section = htab->ovtab;
1288 h->type = STT_OBJECT;
1291 h->ref_regular_nonweak = 1;
1296 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1297 h->root.u.def.section->owner,
1298 h->root.root.string);
1299 bfd_set_error (bfd_error_bad_value);
1306 /* Fill in all stubs and the overlay tables. */
1309 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
1311 struct spu_link_hash_table *htab = spu_hash_table (info);
1312 struct elf_link_hash_entry *h;
1318 htab->emit_stub_syms = emit_syms;
1319 if (htab->stub_count == NULL)
1322 for (i = 0; i <= htab->num_overlays; i++)
1323 if (htab->stub_sec[i]->size != 0)
1325 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1326 htab->stub_sec[i]->size);
1327 if (htab->stub_sec[i]->contents == NULL)
1329 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1330 htab->stub_sec[i]->size = 0;
1333 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1334 htab->ovly_load = h;
1335 BFD_ASSERT (h != NULL
1336 && (h->root.type == bfd_link_hash_defined
1337 || h->root.type == bfd_link_hash_defweak)
1340 s = h->root.u.def.section->output_section;
1341 if (spu_elf_section_data (s)->u.o.ovl_index)
1343 (*_bfd_error_handler) (_("%s in overlay section"),
1344 h->root.u.def.section->owner);
1345 bfd_set_error (bfd_error_bad_value);
1349 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1350 htab->ovly_return = h;
1352 /* Write out all the stubs. */
1353 obfd = htab->ovtab->output_section->owner;
1354 process_stubs (obfd, info, TRUE);
1356 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, htab);
1360 for (i = 0; i <= htab->num_overlays; i++)
1362 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1364 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1365 bfd_set_error (bfd_error_bad_value);
1368 htab->stub_sec[i]->rawsize = 0;
1373 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1374 bfd_set_error (bfd_error_bad_value);
1378 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1379 if (htab->ovtab->contents == NULL)
1382 /* Write out _ovly_table. */
1383 p = htab->ovtab->contents;
1384 /* set low bit of .size to mark non-overlay area as present. */
1386 for (s = obfd->sections; s != NULL; s = s->next)
1388 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1392 unsigned long off = ovl_index * 16;
1393 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1395 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1396 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1397 /* file_off written later in spu_elf_modify_program_headers. */
1398 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
1402 h = define_ovtab_symbol (htab, "_ovly_table");
1405 h->root.u.def.value = 16;
1406 h->size = htab->num_overlays * 16;
1408 h = define_ovtab_symbol (htab, "_ovly_table_end");
1411 h->root.u.def.value = htab->num_overlays * 16 + 16;
1414 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1417 h->root.u.def.value = htab->num_overlays * 16 + 16;
1418 h->size = htab->num_buf * 4;
1420 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1423 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1426 h = define_ovtab_symbol (htab, "_EAR_");
1429 h->root.u.def.section = htab->toe;
1430 h->root.u.def.value = 0;
1436 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1437 Search for stack adjusting insns, and return the sp delta. */
1440 find_function_stack_adjust (asection *sec, bfd_vma offset)
1445 memset (reg, 0, sizeof (reg));
1446 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1448 unsigned char buf[4];
1452 /* Assume no relocs on stack adjusing insns. */
1453 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1456 if (buf[0] == 0x24 /* stqd */)
1460 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1461 /* Partly decoded immediate field. */
1462 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1464 if (buf[0] == 0x1c /* ai */)
1467 imm = (imm ^ 0x200) - 0x200;
1468 reg[rt] = reg[ra] + imm;
1470 if (rt == 1 /* sp */)
1477 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1479 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1481 reg[rt] = reg[ra] + reg[rb];
1485 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1487 if (buf[0] >= 0x42 /* ila */)
1488 imm |= (buf[0] & 1) << 17;
1493 if (buf[0] == 0x40 /* il */)
1495 if ((buf[1] & 0x80) == 0)
1497 imm = (imm ^ 0x8000) - 0x8000;
1499 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1505 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1507 reg[rt] |= imm & 0xffff;
1510 else if (buf[0] == 0x04 /* ori */)
1513 imm = (imm ^ 0x200) - 0x200;
1514 reg[rt] = reg[ra] | imm;
1517 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1518 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1520 /* Used in pic reg load. Say rt is trashed. */
1524 else if (is_branch (buf) || is_indirect_branch (buf))
1525 /* If we hit a branch then we must be out of the prologue. */
1534 /* qsort predicate to sort symbols by section and value. */
1536 static Elf_Internal_Sym *sort_syms_syms;
1537 static asection **sort_syms_psecs;
1540 sort_syms (const void *a, const void *b)
1542 Elf_Internal_Sym *const *s1 = a;
1543 Elf_Internal_Sym *const *s2 = b;
1544 asection *sec1,*sec2;
1545 bfd_signed_vma delta;
1547 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1548 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1551 return sec1->index - sec2->index;
1553 delta = (*s1)->st_value - (*s2)->st_value;
1555 return delta < 0 ? -1 : 1;
1557 delta = (*s2)->st_size - (*s1)->st_size;
1559 return delta < 0 ? -1 : 1;
1561 return *s1 < *s2 ? -1 : 1;
1566 struct function_info *fun;
1567 struct call_info *next;
1571 struct function_info
1573 /* List of functions called. Also branches to hot/cold part of
1575 struct call_info *call_list;
1576 /* For hot/cold part of function, point to owner. */
1577 struct function_info *start;
1578 /* Symbol at start of function. */
1580 Elf_Internal_Sym *sym;
1581 struct elf_link_hash_entry *h;
1583 /* Function section. */
1585 /* Address range of (this part of) function. */
1589 /* Set if global symbol. */
1590 unsigned int global : 1;
1591 /* Set if known to be start of function (as distinct from a hunk
1592 in hot/cold section. */
1593 unsigned int is_func : 1;
1594 /* Flags used during call tree traversal. */
1595 unsigned int visit1 : 1;
1596 unsigned int non_root : 1;
1597 unsigned int visit2 : 1;
1598 unsigned int marking : 1;
1599 unsigned int visit3 : 1;
1602 struct spu_elf_stack_info
1606 /* Variable size array describing functions, one per contiguous
1607 address range belonging to a function. */
1608 struct function_info fun[1];
1611 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1612 entries for section SEC. */
1614 static struct spu_elf_stack_info *
1615 alloc_stack_info (asection *sec, int max_fun)
1617 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1620 amt = sizeof (struct spu_elf_stack_info);
1621 amt += (max_fun - 1) * sizeof (struct function_info);
1622 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1623 if (sec_data->u.i.stack_info != NULL)
1624 sec_data->u.i.stack_info->max_fun = max_fun;
1625 return sec_data->u.i.stack_info;
1628 /* Add a new struct function_info describing a (part of a) function
1629 starting at SYM_H. Keep the array sorted by address. */
1631 static struct function_info *
1632 maybe_insert_function (asection *sec,
1635 bfd_boolean is_func)
1637 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1638 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1644 sinfo = alloc_stack_info (sec, 20);
1651 Elf_Internal_Sym *sym = sym_h;
1652 off = sym->st_value;
1653 size = sym->st_size;
1657 struct elf_link_hash_entry *h = sym_h;
1658 off = h->root.u.def.value;
1662 for (i = sinfo->num_fun; --i >= 0; )
1663 if (sinfo->fun[i].lo <= off)
1668 /* Don't add another entry for an alias, but do update some
1670 if (sinfo->fun[i].lo == off)
1672 /* Prefer globals over local syms. */
1673 if (global && !sinfo->fun[i].global)
1675 sinfo->fun[i].global = TRUE;
1676 sinfo->fun[i].u.h = sym_h;
1679 sinfo->fun[i].is_func = TRUE;
1680 return &sinfo->fun[i];
1682 /* Ignore a zero-size symbol inside an existing function. */
1683 else if (sinfo->fun[i].hi > off && size == 0)
1684 return &sinfo->fun[i];
1687 if (++i < sinfo->num_fun)
1688 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1689 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1690 else if (i >= sinfo->max_fun)
1692 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1693 bfd_size_type old = amt;
1695 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1696 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1697 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1698 sinfo = bfd_realloc (sinfo, amt);
1701 memset ((char *) sinfo + old, 0, amt - old);
1702 sec_data->u.i.stack_info = sinfo;
1704 sinfo->fun[i].is_func = is_func;
1705 sinfo->fun[i].global = global;
1706 sinfo->fun[i].sec = sec;
1708 sinfo->fun[i].u.h = sym_h;
1710 sinfo->fun[i].u.sym = sym_h;
1711 sinfo->fun[i].lo = off;
1712 sinfo->fun[i].hi = off + size;
1713 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1714 sinfo->num_fun += 1;
1715 return &sinfo->fun[i];
1718 /* Return the name of FUN. */
1721 func_name (struct function_info *fun)
1725 Elf_Internal_Shdr *symtab_hdr;
1727 while (fun->start != NULL)
1731 return fun->u.h->root.root.string;
1734 if (fun->u.sym->st_name == 0)
1736 size_t len = strlen (sec->name);
1737 char *name = bfd_malloc (len + 10);
1740 sprintf (name, "%s+%lx", sec->name,
1741 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1745 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1746 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1749 /* Read the instruction at OFF in SEC. Return true iff the instruction
1750 is a nop, lnop, or stop 0 (all zero insn). */
1753 is_nop (asection *sec, bfd_vma off)
1755 unsigned char insn[4];
1757 if (off + 4 > sec->size
1758 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1760 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1762 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1767 /* Extend the range of FUN to cover nop padding up to LIMIT.
1768 Return TRUE iff some instruction other than a NOP was found. */
1771 insns_at_end (struct function_info *fun, bfd_vma limit)
1773 bfd_vma off = (fun->hi + 3) & -4;
1775 while (off < limit && is_nop (fun->sec, off))
1786 /* Check and fix overlapping function ranges. Return TRUE iff there
1787 are gaps in the current info we have about functions in SEC. */
1790 check_function_ranges (asection *sec, struct bfd_link_info *info)
1792 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1793 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1795 bfd_boolean gaps = FALSE;
1800 for (i = 1; i < sinfo->num_fun; i++)
1801 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1803 /* Fix overlapping symbols. */
1804 const char *f1 = func_name (&sinfo->fun[i - 1]);
1805 const char *f2 = func_name (&sinfo->fun[i]);
1807 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1808 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1810 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1813 if (sinfo->num_fun == 0)
1817 if (sinfo->fun[0].lo != 0)
1819 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1821 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1823 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1824 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1826 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1832 /* Search current function info for a function that contains address
1833 OFFSET in section SEC. */
1835 static struct function_info *
1836 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1838 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1839 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1843 hi = sinfo->num_fun;
1846 mid = (lo + hi) / 2;
1847 if (offset < sinfo->fun[mid].lo)
1849 else if (offset >= sinfo->fun[mid].hi)
1852 return &sinfo->fun[mid];
1854 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1859 /* Add CALLEE to CALLER call list if not already present. */
1862 insert_callee (struct function_info *caller, struct call_info *callee)
1864 struct call_info *p;
1865 for (p = caller->call_list; p != NULL; p = p->next)
1866 if (p->fun == callee->fun)
1868 /* Tail calls use less stack than normal calls. Retain entry
1869 for normal call over one for tail call. */
1870 if (p->is_tail > callee->is_tail)
1871 p->is_tail = callee->is_tail;
1874 callee->next = caller->call_list;
1875 caller->call_list = callee;
1879 /* Rummage through the relocs for SEC, looking for function calls.
1880 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1881 mark destination symbols on calls as being functions. Also
1882 look at branches, which may be tail calls or go to hot/cold
1883 section part of same function. */
1886 mark_functions_via_relocs (asection *sec,
1887 struct bfd_link_info *info,
1890 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1891 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1892 Elf_Internal_Sym *syms;
1894 static bfd_boolean warned;
1896 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1898 if (internal_relocs == NULL)
1901 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1902 psyms = &symtab_hdr->contents;
1903 syms = *(Elf_Internal_Sym **) psyms;
1904 irela = internal_relocs;
1905 irelaend = irela + sec->reloc_count;
1906 for (; irela < irelaend; irela++)
1908 enum elf_spu_reloc_type r_type;
1909 unsigned int r_indx;
1911 Elf_Internal_Sym *sym;
1912 struct elf_link_hash_entry *h;
1914 unsigned char insn[4];
1915 bfd_boolean is_call;
1916 struct function_info *caller;
1917 struct call_info *callee;
1919 r_type = ELF32_R_TYPE (irela->r_info);
1920 if (r_type != R_SPU_REL16
1921 && r_type != R_SPU_ADDR16)
1924 r_indx = ELF32_R_SYM (irela->r_info);
1925 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1929 || sym_sec->output_section == NULL
1930 || sym_sec->output_section->owner != sec->output_section->owner)
1933 if (!bfd_get_section_contents (sec->owner, sec, insn,
1934 irela->r_offset, 4))
1936 if (!is_branch (insn))
1939 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1940 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1944 if (!call_tree || !warned)
1945 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1946 " %B(%A), stack analysis incomplete\n"),
1947 sec->owner, sec, irela->r_offset,
1948 sym_sec->owner, sym_sec);
1952 is_call = (insn[0] & 0xfd) == 0x31;
1955 val = h->root.u.def.value;
1957 val = sym->st_value;
1958 val += irela->r_addend;
1962 struct function_info *fun;
1964 if (irela->r_addend != 0)
1966 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1969 fake->st_value = val;
1971 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
1975 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
1977 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
1980 if (irela->r_addend != 0
1981 && fun->u.sym != sym)
1986 caller = find_function (sec, irela->r_offset, info);
1989 callee = bfd_malloc (sizeof *callee);
1993 callee->fun = find_function (sym_sec, val, info);
1994 if (callee->fun == NULL)
1996 callee->is_tail = !is_call;
1997 if (!insert_callee (caller, callee))
2000 && !callee->fun->is_func
2001 && callee->fun->stack == 0)
2003 /* This is either a tail call or a branch from one part of
2004 the function to another, ie. hot/cold section. If the
2005 destination has been called by some other function then
2006 it is a separate function. We also assume that functions
2007 are not split across input files. */
2008 if (sec->owner != sym_sec->owner)
2010 callee->fun->start = NULL;
2011 callee->fun->is_func = TRUE;
2013 else if (callee->fun->start == NULL)
2014 callee->fun->start = caller;
2017 struct function_info *callee_start;
2018 struct function_info *caller_start;
2019 callee_start = callee->fun;
2020 while (callee_start->start)
2021 callee_start = callee_start->start;
2022 caller_start = caller;
2023 while (caller_start->start)
2024 caller_start = caller_start->start;
2025 if (caller_start != callee_start)
2027 callee->fun->start = NULL;
2028 callee->fun->is_func = TRUE;
2037 /* Handle something like .init or .fini, which has a piece of a function.
2038 These sections are pasted together to form a single function. */
2041 pasted_function (asection *sec, struct bfd_link_info *info)
2043 struct bfd_link_order *l;
2044 struct _spu_elf_section_data *sec_data;
2045 struct spu_elf_stack_info *sinfo;
2046 Elf_Internal_Sym *fake;
2047 struct function_info *fun, *fun_start;
2049 fake = bfd_zmalloc (sizeof (*fake));
2053 fake->st_size = sec->size;
2055 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2056 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2060 /* Find a function immediately preceding this section. */
2062 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2064 if (l->u.indirect.section == sec)
2066 if (fun_start != NULL)
2067 fun->start = fun_start;
2070 if (l->type == bfd_indirect_link_order
2071 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2072 && (sinfo = sec_data->u.i.stack_info) != NULL
2073 && sinfo->num_fun != 0)
2074 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2077 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2081 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2082 overlay stub sections. */
2085 interesting_section (asection *s, bfd *obfd)
2087 return (s->output_section != NULL
2088 && s->output_section->owner == obfd
2089 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2090 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2094 /* Map address ranges in code sections to functions. */
2097 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2101 Elf_Internal_Sym ***psym_arr;
2102 asection ***sec_arr;
2103 bfd_boolean gaps = FALSE;
2106 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2109 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2110 if (psym_arr == NULL)
2112 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2113 if (sec_arr == NULL)
2117 for (ibfd = info->input_bfds, bfd_idx = 0;
2119 ibfd = ibfd->link_next, bfd_idx++)
2121 extern const bfd_target bfd_elf32_spu_vec;
2122 Elf_Internal_Shdr *symtab_hdr;
2125 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2126 asection **psecs, **p;
2128 if (ibfd->xvec != &bfd_elf32_spu_vec)
2131 /* Read all the symbols. */
2132 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2133 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2137 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2140 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2142 symtab_hdr->contents = (void *) syms;
2147 /* Select defined function symbols that are going to be output. */
2148 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2151 psym_arr[bfd_idx] = psyms;
2152 psecs = bfd_malloc (symcount * sizeof (*psecs));
2155 sec_arr[bfd_idx] = psecs;
2156 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2157 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2158 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2162 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2163 if (s != NULL && interesting_section (s, output_bfd))
2166 symcount = psy - psyms;
2169 /* Sort them by section and offset within section. */
2170 sort_syms_syms = syms;
2171 sort_syms_psecs = psecs;
2172 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2174 /* Now inspect the function symbols. */
2175 for (psy = psyms; psy < psyms + symcount; )
2177 asection *s = psecs[*psy - syms];
2178 Elf_Internal_Sym **psy2;
2180 for (psy2 = psy; ++psy2 < psyms + symcount; )
2181 if (psecs[*psy2 - syms] != s)
2184 if (!alloc_stack_info (s, psy2 - psy))
2189 /* First install info about properly typed and sized functions.
2190 In an ideal world this will cover all code sections, except
2191 when partitioning functions into hot and cold sections,
2192 and the horrible pasted together .init and .fini functions. */
2193 for (psy = psyms; psy < psyms + symcount; ++psy)
2196 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2198 asection *s = psecs[sy - syms];
2199 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2204 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2205 if (interesting_section (sec, output_bfd))
2206 gaps |= check_function_ranges (sec, info);
2211 /* See if we can discover more function symbols by looking at
2213 for (ibfd = info->input_bfds, bfd_idx = 0;
2215 ibfd = ibfd->link_next, bfd_idx++)
2219 if (psym_arr[bfd_idx] == NULL)
2222 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2223 if (interesting_section (sec, output_bfd)
2224 && sec->reloc_count != 0)
2226 if (!mark_functions_via_relocs (sec, info, FALSE))
2231 for (ibfd = info->input_bfds, bfd_idx = 0;
2233 ibfd = ibfd->link_next, bfd_idx++)
2235 Elf_Internal_Shdr *symtab_hdr;
2237 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2240 if ((psyms = psym_arr[bfd_idx]) == NULL)
2243 psecs = sec_arr[bfd_idx];
2245 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2246 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2249 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2250 if (interesting_section (sec, output_bfd))
2251 gaps |= check_function_ranges (sec, info);
2255 /* Finally, install all globals. */
2256 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2260 s = psecs[sy - syms];
2262 /* Global syms might be improperly typed functions. */
2263 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2264 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2266 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2271 /* Some of the symbols we've installed as marking the
2272 beginning of functions may have a size of zero. Extend
2273 the range of such functions to the beginning of the
2274 next symbol of interest. */
2275 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2276 if (interesting_section (sec, output_bfd))
2278 struct _spu_elf_section_data *sec_data;
2279 struct spu_elf_stack_info *sinfo;
2281 sec_data = spu_elf_section_data (sec);
2282 sinfo = sec_data->u.i.stack_info;
2286 bfd_vma hi = sec->size;
2288 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2290 sinfo->fun[fun_idx].hi = hi;
2291 hi = sinfo->fun[fun_idx].lo;
2294 /* No symbols in this section. Must be .init or .fini
2295 or something similar. */
2296 else if (!pasted_function (sec, info))
2302 for (ibfd = info->input_bfds, bfd_idx = 0;
2304 ibfd = ibfd->link_next, bfd_idx++)
2306 if (psym_arr[bfd_idx] == NULL)
2309 free (psym_arr[bfd_idx]);
2310 free (sec_arr[bfd_idx]);
2319 /* Mark nodes in the call graph that are called by some other node. */
2322 mark_non_root (struct function_info *fun)
2324 struct call_info *call;
2327 for (call = fun->call_list; call; call = call->next)
2329 call->fun->non_root = TRUE;
2330 if (!call->fun->visit1)
2331 mark_non_root (call->fun);
2335 /* Remove cycles from the call graph. */
2338 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2340 struct call_info **callp, *call;
2343 fun->marking = TRUE;
2345 callp = &fun->call_list;
2346 while ((call = *callp) != NULL)
2348 if (!call->fun->visit2)
2349 call_graph_traverse (call->fun, info);
2350 else if (call->fun->marking)
2352 const char *f1 = func_name (fun);
2353 const char *f2 = func_name (call->fun);
2355 info->callbacks->info (_("Stack analysis will ignore the call "
2358 *callp = call->next;
2361 callp = &call->next;
2363 fun->marking = FALSE;
2366 /* Populate call_list for each function. */
2369 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2373 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2375 extern const bfd_target bfd_elf32_spu_vec;
2378 if (ibfd->xvec != &bfd_elf32_spu_vec)
2381 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2383 if (!interesting_section (sec, output_bfd)
2384 || sec->reloc_count == 0)
2387 if (!mark_functions_via_relocs (sec, info, TRUE))
2391 /* Transfer call info from hot/cold section part of function
2393 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2395 struct _spu_elf_section_data *sec_data;
2396 struct spu_elf_stack_info *sinfo;
2398 if ((sec_data = spu_elf_section_data (sec)) != NULL
2399 && (sinfo = sec_data->u.i.stack_info) != NULL)
2402 for (i = 0; i < sinfo->num_fun; ++i)
2404 struct function_info *start = sinfo->fun[i].start;
2408 struct call_info *call;
2410 while (start->start != NULL)
2411 start = start->start;
2412 call = sinfo->fun[i].call_list;
2413 while (call != NULL)
2415 struct call_info *call_next = call->next;
2416 if (!insert_callee (start, call))
2420 sinfo->fun[i].call_list = NULL;
2421 sinfo->fun[i].non_root = TRUE;
2428 /* Find the call graph root(s). */
2429 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2431 extern const bfd_target bfd_elf32_spu_vec;
2434 if (ibfd->xvec != &bfd_elf32_spu_vec)
2437 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2439 struct _spu_elf_section_data *sec_data;
2440 struct spu_elf_stack_info *sinfo;
2442 if ((sec_data = spu_elf_section_data (sec)) != NULL
2443 && (sinfo = sec_data->u.i.stack_info) != NULL)
2446 for (i = 0; i < sinfo->num_fun; ++i)
2447 if (!sinfo->fun[i].visit1)
2448 mark_non_root (&sinfo->fun[i]);
2453 /* Remove cycles from the call graph. We start from the root node(s)
2454 so that we break cycles in a reasonable place. */
2455 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2457 extern const bfd_target bfd_elf32_spu_vec;
2460 if (ibfd->xvec != &bfd_elf32_spu_vec)
2463 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2465 struct _spu_elf_section_data *sec_data;
2466 struct spu_elf_stack_info *sinfo;
2468 if ((sec_data = spu_elf_section_data (sec)) != NULL
2469 && (sinfo = sec_data->u.i.stack_info) != NULL)
2472 for (i = 0; i < sinfo->num_fun; ++i)
2473 if (!sinfo->fun[i].non_root)
2474 call_graph_traverse (&sinfo->fun[i], info);
2482 /* Descend the call graph for FUN, accumulating total stack required. */
2485 sum_stack (struct function_info *fun,
2486 struct bfd_link_info *info,
2487 int emit_stack_syms)
2489 struct call_info *call;
2490 struct function_info *max = NULL;
2491 bfd_vma max_stack = fun->stack;
2498 for (call = fun->call_list; call; call = call->next)
2500 stack = sum_stack (call->fun, info, emit_stack_syms);
2501 /* Include caller stack for normal calls, don't do so for
2502 tail calls. fun->stack here is local stack usage for
2505 stack += fun->stack;
2506 if (max_stack < stack)
2513 f1 = func_name (fun);
2514 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
2515 f1, (bfd_vma) fun->stack, max_stack);
2519 info->callbacks->minfo (_(" calls:\n"));
2520 for (call = fun->call_list; call; call = call->next)
2522 const char *f2 = func_name (call->fun);
2523 const char *ann1 = call->fun == max ? "*" : " ";
2524 const char *ann2 = call->is_tail ? "t" : " ";
2526 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2530 /* Now fun->stack holds cumulative stack. */
2531 fun->stack = max_stack;
2534 if (emit_stack_syms)
2536 struct spu_link_hash_table *htab = spu_hash_table (info);
2537 char *name = bfd_malloc (18 + strlen (f1));
2538 struct elf_link_hash_entry *h;
2542 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2543 sprintf (name, "__stack_%s", f1);
2545 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2547 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2550 && (h->root.type == bfd_link_hash_new
2551 || h->root.type == bfd_link_hash_undefined
2552 || h->root.type == bfd_link_hash_undefweak))
2554 h->root.type = bfd_link_hash_defined;
2555 h->root.u.def.section = bfd_abs_section_ptr;
2556 h->root.u.def.value = max_stack;
2561 h->ref_regular_nonweak = 1;
2562 h->forced_local = 1;
2571 /* Provide an estimate of total stack required. */
2574 spu_elf_stack_analysis (bfd *output_bfd,
2575 struct bfd_link_info *info,
2576 int emit_stack_syms)
2579 bfd_vma max_stack = 0;
2581 if (!discover_functions (output_bfd, info))
2584 if (!build_call_tree (output_bfd, info))
2587 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2588 info->callbacks->minfo (_("\nStack size for functions. "
2589 "Annotations: '*' max stack, 't' tail call\n"));
2590 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2592 extern const bfd_target bfd_elf32_spu_vec;
2595 if (ibfd->xvec != &bfd_elf32_spu_vec)
2598 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2600 struct _spu_elf_section_data *sec_data;
2601 struct spu_elf_stack_info *sinfo;
2603 if ((sec_data = spu_elf_section_data (sec)) != NULL
2604 && (sinfo = sec_data->u.i.stack_info) != NULL)
2607 for (i = 0; i < sinfo->num_fun; ++i)
2609 if (!sinfo->fun[i].non_root)
2614 stack = sum_stack (&sinfo->fun[i], info,
2616 f1 = func_name (&sinfo->fun[i]);
2617 info->callbacks->info (_(" %s: 0x%v\n"),
2619 if (max_stack < stack)
2627 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2631 /* Perform a final link. */
2634 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2636 struct spu_link_hash_table *htab = spu_hash_table (info);
2638 if (htab->stack_analysis
2639 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2640 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2642 return bfd_elf_final_link (output_bfd, info);
2645 /* Called when not normally emitting relocs, ie. !info->relocatable
2646 and !info->emitrelocations. Returns a count of special relocs
2647 that need to be emitted. */
2650 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2652 unsigned int count = 0;
2653 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2655 for (; relocs < relend; relocs++)
2657 int r_type = ELF32_R_TYPE (relocs->r_info);
2658 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2665 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2668 spu_elf_relocate_section (bfd *output_bfd,
2669 struct bfd_link_info *info,
2671 asection *input_section,
2673 Elf_Internal_Rela *relocs,
2674 Elf_Internal_Sym *local_syms,
2675 asection **local_sections)
2677 Elf_Internal_Shdr *symtab_hdr;
2678 struct elf_link_hash_entry **sym_hashes;
2679 Elf_Internal_Rela *rel, *relend;
2680 struct spu_link_hash_table *htab;
2682 bfd_boolean emit_these_relocs = FALSE;
2684 htab = spu_hash_table (info);
2685 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2686 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2689 relend = relocs + input_section->reloc_count;
2690 for (; rel < relend; rel++)
2693 reloc_howto_type *howto;
2694 unsigned long r_symndx;
2695 Elf_Internal_Sym *sym;
2697 struct elf_link_hash_entry *h;
2698 const char *sym_name;
2701 bfd_reloc_status_type r;
2702 bfd_boolean unresolved_reloc;
2705 r_symndx = ELF32_R_SYM (rel->r_info);
2706 r_type = ELF32_R_TYPE (rel->r_info);
2707 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2709 emit_these_relocs = TRUE;
2713 howto = elf_howto_table + r_type;
2714 unresolved_reloc = FALSE;
2719 if (r_symndx < symtab_hdr->sh_info)
2721 sym = local_syms + r_symndx;
2722 sec = local_sections[r_symndx];
2723 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2724 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2728 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2729 r_symndx, symtab_hdr, sym_hashes,
2731 unresolved_reloc, warned);
2732 sym_name = h->root.root.string;
2735 if (sec != NULL && elf_discarded_section (sec))
2737 /* For relocs against symbols from removed linkonce sections,
2738 or sections discarded by a linker script, we just want the
2739 section contents zeroed. Avoid any special processing. */
2740 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2746 if (info->relocatable)
2749 if (unresolved_reloc)
2751 (*_bfd_error_handler)
2752 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2754 bfd_get_section_name (input_bfd, input_section),
2755 (long) rel->r_offset,
2761 /* If this symbol is in an overlay area, we may need to relocate
2762 to the overlay stub. */
2763 addend = rel->r_addend;
2764 if (htab->stub_sec != NULL
2766 && sec->output_section != NULL
2767 && sec->output_section->owner == output_bfd
2769 || (h != htab->ovly_load && h != htab->ovly_return)))
2772 unsigned int sym_type;
2775 if (r_type == R_SPU_REL16
2776 || r_type == R_SPU_ADDR16)
2777 branch = (is_branch (contents + rel->r_offset)
2778 || is_hint (contents + rel->r_offset));
2783 sym_type = ELF_ST_TYPE (sym->st_info);
2785 if ((sym_type == STT_FUNC || branch)
2786 && needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2788 unsigned int ovl = 0;
2789 struct got_entry *g, **head;
2792 ovl = (spu_elf_section_data (input_section->output_section)
2796 head = &h->got.glist;
2798 head = elf_local_got_ents (input_bfd) + r_symndx;
2800 for (g = *head; g != NULL; g = g->next)
2801 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
2806 relocation = g->stub_addr;
2811 r = _bfd_final_link_relocate (howto,
2815 rel->r_offset, relocation, addend);
2817 if (r != bfd_reloc_ok)
2819 const char *msg = (const char *) 0;
2823 case bfd_reloc_overflow:
2824 if (!((*info->callbacks->reloc_overflow)
2825 (info, (h ? &h->root : NULL), sym_name, howto->name,
2826 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2830 case bfd_reloc_undefined:
2831 if (!((*info->callbacks->undefined_symbol)
2832 (info, sym_name, input_bfd, input_section,
2833 rel->r_offset, TRUE)))
2837 case bfd_reloc_outofrange:
2838 msg = _("internal error: out of range error");
2841 case bfd_reloc_notsupported:
2842 msg = _("internal error: unsupported relocation error");
2845 case bfd_reloc_dangerous:
2846 msg = _("internal error: dangerous error");
2850 msg = _("internal error: unknown error");
2855 if (!((*info->callbacks->warning)
2856 (info, msg, sym_name, input_bfd, input_section,
2865 && emit_these_relocs
2866 && !info->relocatable
2867 && !info->emitrelocations)
2869 Elf_Internal_Rela *wrel;
2870 Elf_Internal_Shdr *rel_hdr;
2872 wrel = rel = relocs;
2873 relend = relocs + input_section->reloc_count;
2874 for (; rel < relend; rel++)
2878 r_type = ELF32_R_TYPE (rel->r_info);
2879 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2882 input_section->reloc_count = wrel - relocs;
2883 /* Backflips for _bfd_elf_link_output_relocs. */
2884 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2885 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2892 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2895 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2896 const char *sym_name ATTRIBUTE_UNUSED,
2897 Elf_Internal_Sym *sym,
2898 asection *sym_sec ATTRIBUTE_UNUSED,
2899 struct elf_link_hash_entry *h)
2901 struct spu_link_hash_table *htab = spu_hash_table (info);
2903 if (!info->relocatable
2904 && htab->stub_sec != NULL
2906 && (h->root.type == bfd_link_hash_defined
2907 || h->root.type == bfd_link_hash_defweak)
2909 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2911 struct got_entry *g;
2913 for (g = h->got.glist; g != NULL; g = g->next)
2914 if (g->addend == 0 && g->ovl == 0)
2916 sym->st_shndx = (_bfd_elf_section_from_bfd_section
2917 (htab->stub_sec[0]->output_section->owner,
2918 htab->stub_sec[0]->output_section));
2919 sym->st_value = g->stub_addr;
2927 static int spu_plugin = 0;
2930 spu_elf_plugin (int val)
2935 /* Set ELF header e_type for plugins. */
2938 spu_elf_post_process_headers (bfd *abfd,
2939 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2943 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2945 i_ehdrp->e_type = ET_DYN;
2949 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2950 segments for overlays. */
2953 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2955 struct spu_link_hash_table *htab = spu_hash_table (info);
2956 int extra = htab->num_overlays;
2962 sec = bfd_get_section_by_name (abfd, ".toe");
2963 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2969 /* Remove .toe section from other PT_LOAD segments and put it in
2970 a segment of its own. Put overlays in separate segments too. */
2973 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2976 struct elf_segment_map *m;
2982 toe = bfd_get_section_by_name (abfd, ".toe");
2983 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2984 if (m->p_type == PT_LOAD && m->count > 1)
2985 for (i = 0; i < m->count; i++)
2986 if ((s = m->sections[i]) == toe
2987 || spu_elf_section_data (s)->u.o.ovl_index != 0)
2989 struct elf_segment_map *m2;
2992 if (i + 1 < m->count)
2994 amt = sizeof (struct elf_segment_map);
2995 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
2996 m2 = bfd_zalloc (abfd, amt);
2999 m2->count = m->count - (i + 1);
3000 memcpy (m2->sections, m->sections + i + 1,
3001 m2->count * sizeof (m->sections[0]));
3002 m2->p_type = PT_LOAD;
3010 amt = sizeof (struct elf_segment_map);
3011 m2 = bfd_zalloc (abfd, amt);
3014 m2->p_type = PT_LOAD;
3016 m2->sections[0] = s;
3026 /* Check that all loadable section VMAs lie in the range
3027 LO .. HI inclusive. */
3030 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3032 struct elf_segment_map *m;
3035 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3036 if (m->p_type == PT_LOAD)
3037 for (i = 0; i < m->count; i++)
3038 if (m->sections[i]->size != 0
3039 && (m->sections[i]->vma < lo
3040 || m->sections[i]->vma > hi
3041 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3042 return m->sections[i];
3047 /* Tweak the section type of .note.spu_name. */
3050 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3051 Elf_Internal_Shdr *hdr,
3054 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3055 hdr->sh_type = SHT_NOTE;
3059 /* Tweak phdrs before writing them out. */
3062 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3064 const struct elf_backend_data *bed;
3065 struct elf_obj_tdata *tdata;
3066 Elf_Internal_Phdr *phdr, *last;
3067 struct spu_link_hash_table *htab;
3074 bed = get_elf_backend_data (abfd);
3075 tdata = elf_tdata (abfd);
3077 count = tdata->program_header_size / bed->s->sizeof_phdr;
3078 htab = spu_hash_table (info);
3079 if (htab->num_overlays != 0)
3081 struct elf_segment_map *m;
3084 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3086 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
3088 /* Mark this as an overlay header. */
3089 phdr[i].p_flags |= PF_OVERLAY;
3091 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3093 bfd_byte *p = htab->ovtab->contents;
3094 unsigned int off = o * 16 + 8;
3096 /* Write file_off into _ovly_table. */
3097 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3102 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3103 of 16. This should always be possible when using the standard
3104 linker scripts, but don't create overlapping segments if
3105 someone is playing games with linker scripts. */
3107 for (i = count; i-- != 0; )
3108 if (phdr[i].p_type == PT_LOAD)
3112 adjust = -phdr[i].p_filesz & 15;
3115 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3118 adjust = -phdr[i].p_memsz & 15;
3121 && phdr[i].p_filesz != 0
3122 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3123 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3126 if (phdr[i].p_filesz != 0)
3130 if (i == (unsigned int) -1)
3131 for (i = count; i-- != 0; )
3132 if (phdr[i].p_type == PT_LOAD)
3136 adjust = -phdr[i].p_filesz & 15;
3137 phdr[i].p_filesz += adjust;
3139 adjust = -phdr[i].p_memsz & 15;
3140 phdr[i].p_memsz += adjust;
3146 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3147 #define TARGET_BIG_NAME "elf32-spu"
3148 #define ELF_ARCH bfd_arch_spu
3149 #define ELF_MACHINE_CODE EM_SPU
3150 /* This matches the alignment need for DMA. */
3151 #define ELF_MAXPAGESIZE 0x80
3152 #define elf_backend_rela_normal 1
3153 #define elf_backend_can_gc_sections 1
3155 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3156 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3157 #define elf_info_to_howto spu_elf_info_to_howto
3158 #define elf_backend_count_relocs spu_elf_count_relocs
3159 #define elf_backend_relocate_section spu_elf_relocate_section
3160 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3161 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3162 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3163 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3165 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3166 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3167 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3168 #define elf_backend_post_process_headers spu_elf_post_process_headers
3169 #define elf_backend_fake_sections spu_elf_fake_sections
3170 #define elf_backend_special_sections spu_elf_special_sections
3171 #define bfd_elf32_bfd_final_link spu_elf_final_link
3173 #include "elf32-target.h"