#define STUB_ENTRY_NAME "__%s_veneer"
+#define CMSE_PREFIX "__acle_se_"
+
/* The name of the dynamic interpreter. This is put in the .interp
section. */
#define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
};
+/* Stub used for transition to secure state (aka SG veneer). */
+static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
+{
+ THUMB32_INSN (0xe97fe97f), /* sg. */
+ THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
+};
+
/* Cortex-A8 erratum-workaround stubs. */
DEF_STUB(long_branch_v4t_thumb_tls_pic) \
DEF_STUB(long_branch_arm_nacl) \
DEF_STUB(long_branch_arm_nacl_pic) \
+ DEF_STUB(cmse_branch_thumb_only) \
DEF_STUB(a8_veneer_b_cond) \
DEF_STUB(a8_veneer_b) \
DEF_STUB(a8_veneer_bl) \
information on stub grouping. */
struct map_stub *stub_group;
+ /* Input stub section holding secure gateway veneers. */
+ asection *cmse_stub_sec;
+
/* Number of elements in stub_group. */
unsigned int top_id;
union and *ARM_PLT at the ARM-specific information. */
static bfd_boolean
-elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
+elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
+ struct elf32_arm_link_hash_entry *h,
unsigned long r_symndx, union gotplt_union **root_plt,
struct arm_plt_info **arm_plt)
{
struct arm_local_iplt_info *local_iplt;
+ if (globals->root.splt == NULL && globals->root.iplt == NULL)
+ return FALSE;
+
if (h != NULL)
{
*root_plt = &h->root.plt;
case arm_stub_long_branch_v4t_thumb_arm_pic:
case arm_stub_long_branch_v4t_thumb_tls_pic:
case arm_stub_long_branch_thumb_only_pic:
+ case arm_stub_cmse_branch_thumb_only:
return TRUE;
case arm_stub_none:
BFD_FAIL ();
the address of the appropriate trampoline. */
if (r_type != R_ARM_TLS_CALL
&& r_type != R_ARM_THM_TLS_CALL
- && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
- &root_plt, &arm_plt)
+ && elf32_arm_get_plt_info (input_bfd, globals, hash,
+ ELF32_R_SYM (rel->r_info), &root_plt,
+ &arm_plt)
&& root_plt->offset != (bfd_vma) -1)
{
asection *splt;
if (stub_type >= max_stub_type)
abort (); /* Should be unreachable. */
- return FALSE;
+ switch (stub_type)
+ {
+ case arm_stub_cmse_branch_thumb_only:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+
+ abort (); /* Should be unreachable. */
}
/* Required alignment (as a power of 2) for the dedicated section holding
if (stub_type >= max_stub_type)
abort (); /* Should be unreachable. */
- BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
- return 0;
+ switch (stub_type)
+ {
+ /* Vectors of Secure Gateway veneers must be aligned on 32byte
+ boundary. */
+ case arm_stub_cmse_branch_thumb_only:
+ return 5;
+
+ default:
+ BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+ return 0;
+ }
+
+ abort (); /* Should be unreachable. */
}
/* Name of the dedicated output section to put veneers of type STUB_TYPE, or
if (stub_type >= max_stub_type)
abort (); /* Should be unreachable. */
- BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
- return NULL;
+ switch (stub_type)
+ {
+ case arm_stub_cmse_branch_thumb_only:
+ return ".gnu.sgstubs";
+
+ default:
+ BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+ return NULL;
+ }
+
+ abort (); /* Should be unreachable. */
}
/* If veneers of type STUB_TYPE should go in a dedicated output section,
corresponding input section. Otherwise, returns NULL. */
static asection **
-arm_dedicated_stub_input_section_ptr
- (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
- enum elf32_arm_stub_type stub_type)
+arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
+ enum elf32_arm_stub_type stub_type)
{
if (stub_type >= max_stub_type)
abort (); /* Should be unreachable. */
- BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
- return NULL;
+ switch (stub_type)
+ {
+ case arm_stub_cmse_branch_thumb_only:
+ return &htab->cmse_stub_sec;
+
+ default:
+ BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+ return NULL;
+ }
+
+ abort (); /* Should be unreachable. */
}
/* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
case arm_stub_long_branch_thumb_only_pic:
case arm_stub_long_branch_any_tls_pic:
case arm_stub_long_branch_v4t_thumb_tls_pic:
+ case arm_stub_cmse_branch_thumb_only:
case arm_stub_a8_veneer_blx:
return 4;
if (stub_type >= max_stub_type)
abort (); /* Should be unreachable. */
- return FALSE;
+ switch (stub_type)
+ {
+ case arm_stub_cmse_branch_thumb_only:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+
+ abort (); /* Should be unreachable. */
}
/* Returns the padding needed for the dedicated section used stubs of type
if (stub_type >= max_stub_type)
abort (); /* Should be unreachable. */
- return 0;
+ switch (stub_type)
+ {
+ case arm_stub_cmse_branch_thumb_only:
+ return 32;
+
+ default:
+ return 0;
+ }
+
+ abort (); /* Should be unreachable. */
}
static bfd_boolean
return TRUE;
}
+/* Scan symbols in INPUT_BFD to identify secure entry functions needing a
+ gateway veneer to transition from non secure to secure state and create them
+ accordingly.
+
+ "ARMv8-M Security Extensions: Requirements on Development Tools" document
+ defines the conditions that govern Secure Gateway veneer creation for a
+ given symbol <SYM> as follows:
+ - it has function type
+ - it has non local binding
+ - a symbol named __acle_se_<SYM> (called special symbol) exists with the
+ same type, binding and value as <SYM> (called normal symbol).
+ An entry function can handle secure state transition itself in which case
+ its special symbol would have a different value from the normal symbol.
+
+ OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
+ entry mapping while HTAB gives the name to hash entry mapping.
+
+ If any secure gateway veneer is created, *STUB_CHANGED is set to TRUE. The
+ return value gives whether a stub failed to be allocated. */
+
+static bfd_boolean
+cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
+ obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
+ bfd_boolean *stub_changed)
+{
+ const struct elf_backend_data *bed;
+ Elf_Internal_Shdr *symtab_hdr;
+ unsigned i, j, sym_count, ext_start;
+ Elf_Internal_Sym *cmse_sym, *local_syms;
+ struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
+ enum arm_st_branch_type branch_type;
+ char *sym_name, *lsym_name;
+ bfd_vma sym_value;
+ asection *section;
+ bfd_boolean is_v8m, new_stub, created_stub, cmse_invalid, ret = TRUE;
+
+ bed = get_elf_backend_data (input_bfd);
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
+ ext_start = symtab_hdr->sh_info;
+ is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
+ && out_attr[Tag_CPU_arch_profile].i == 'M');
+
+ local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (local_syms == NULL)
+ local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
+ symtab_hdr->sh_info, 0, NULL, NULL,
+ NULL);
+ if (symtab_hdr->sh_info && local_syms == NULL)
+ return FALSE;
+
+ /* Scan symbols. */
+ for (i = 0; i < sym_count; i++)
+ {
+ cmse_invalid = FALSE;
+
+ if (i < ext_start)
+ {
+ cmse_sym = &local_syms[i];
+ /* Not a special symbol. */
+ if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
+ continue;
+ sym_name = bfd_elf_string_from_elf_section (input_bfd,
+ symtab_hdr->sh_link,
+ cmse_sym->st_name);
+ /* Special symbol with local binding. */
+ cmse_invalid = TRUE;
+ }
+ else
+ {
+ cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
+ sym_name = (char *) cmse_hash->root.root.root.string;
+
+ /* Not a special symbol. */
+ if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
+ continue;
+
+ /* Special symbol has incorrect binding or type. */
+ if ((cmse_hash->root.root.type != bfd_link_hash_defined
+ && cmse_hash->root.root.type != bfd_link_hash_defweak)
+ || cmse_hash->root.type != STT_FUNC)
+ cmse_invalid = TRUE;
+ }
+
+ if (!is_v8m)
+ {
+ (*_bfd_error_handler) (_("%B: Special symbol `%s' only allowed for "
+ "ARMv8-M architecture or later."),
+ input_bfd, sym_name);
+ is_v8m = TRUE; /* Avoid multiple warning. */
+ ret = FALSE;
+ }
+
+ if (cmse_invalid)
+ {
+ (*_bfd_error_handler) (_("%B: invalid special symbol `%s'."),
+ input_bfd, sym_name);
+ (*_bfd_error_handler) (_("It must be a global or weak function "
+ "symbol."));
+ ret = FALSE;
+ if (i < ext_start)
+ continue;
+ }
+
+ sym_name += strlen (CMSE_PREFIX);
+ hash = (struct elf32_arm_link_hash_entry *)
+ elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
+
+ /* No associated normal symbol or it is neither global nor weak. */
+ if (!hash
+ || (hash->root.root.type != bfd_link_hash_defined
+ && hash->root.root.type != bfd_link_hash_defweak)
+ || hash->root.type != STT_FUNC)
+ {
+ /* Initialize here to avoid warning about use of possibly
+ uninitialized variable. */
+ j = 0;
+
+ if (!hash)
+ {
+ /* Searching for a normal symbol with local binding. */
+ for (; j < ext_start; j++)
+ {
+ lsym_name =
+ bfd_elf_string_from_elf_section (input_bfd,
+ symtab_hdr->sh_link,
+ local_syms[j].st_name);
+ if (!strcmp (sym_name, lsym_name))
+ break;
+ }
+ }
+
+ if (hash || j < ext_start)
+ {
+ (*_bfd_error_handler)
+ (_("%B: invalid standard symbol `%s'."), input_bfd, sym_name);
+ (*_bfd_error_handler)
+ (_("It must be a global or weak function symbol."));
+ }
+ else
+ (*_bfd_error_handler)
+ (_("%B: absent standard symbol `%s'."), input_bfd, sym_name);
+ ret = FALSE;
+ if (!hash)
+ continue;
+ }
+
+ sym_value = hash->root.root.u.def.value;
+ section = hash->root.root.u.def.section;
+
+ if (cmse_hash->root.root.u.def.section != section)
+ {
+ (*_bfd_error_handler)
+ (_("%B: `%s' and its special symbol are in different sections."),
+ input_bfd, sym_name);
+ ret = FALSE;
+ }
+ if (cmse_hash->root.root.u.def.value != sym_value)
+ continue; /* Ignore: could be an entry function starting with SG. */
+
+ /* If this section is a link-once section that will be discarded, then
+ don't create any stubs. */
+ if (section->output_section == NULL)
+ {
+ (*_bfd_error_handler)
+ (_("%B: entry function `%s' not output."), input_bfd, sym_name);
+ continue;
+ }
+
+ if (hash->root.size == 0)
+ {
+ (*_bfd_error_handler)
+ (_("%B: entry function `%s' is empty."), input_bfd, sym_name);
+ ret = FALSE;
+ }
+
+ if (!ret)
+ continue;
+ branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
+ created_stub
+ = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
+ NULL, NULL, section, hash, sym_name,
+ sym_value, branch_type, &new_stub);
+
+ if (!created_stub)
+ ret = FALSE;
+ else
+ {
+ BFD_ASSERT (new_stub);
+ *stub_changed = TRUE;
+ }
+ }
+
+ if (!symtab_hdr->contents)
+ free (local_syms);
+ return ret;
+}
+
/* Determine and set the size of the stub section for a final link.
The basic idea here is to examine all the relocations looking for
unsigned int),
void (*layout_sections_again) (void))
{
+ obj_attribute *out_attr;
bfd_size_type stub_group_size;
- bfd_boolean stubs_always_after_branch;
+ bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
struct a8_erratum_fix *a8_fixes = NULL;
unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
htab->layout_sections_again = layout_sections_again;
stubs_always_after_branch = group_size < 0;
+ out_attr = elf_known_obj_attributes_proc (output_bfd);
+ m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
/* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
as the first half of a 32-bit branch straddling two 4K pages. This is a
crude way of enforcing that. */
if (symtab_hdr->sh_info == 0)
continue;
+ /* Limit scan of symbols to object file whose profile is
+ Microcontroller to not hinder performance in the general case. */
+ if (m_profile && first_veneer_scan)
+ {
+ struct elf_link_hash_entry **sym_hashes;
+
+ sym_hashes = elf_sym_hashes (input_bfd);
+ if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
+ &stub_changed))
+ goto error_ret_free_local;
+ }
+
/* Walk over each section attached to the input bfd. */
for (section = input_bfd->sections;
section != NULL;
/* Ask the linker to do its stuff. */
(*htab->layout_sections_again) ();
+ first_veneer_scan = FALSE;
}
/* Add stubs for Cortex-A8 erratum fixes now. */
/* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
VALUE appropriately for relocations that we resolve at link time. */
has_iplt_entry = FALSE;
- if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
+ if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
+ &arm_plt)
&& root_plt->offset != (bfd_vma) -1)
{
plt_offset = root_plt->offset;
}
if (may_need_local_target_p
- && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
+ && elf32_arm_get_plt_info (abfd, globals, eh, r_symndx, &root_plt,
+ &arm_plt))
{
/* If PLT refcount book-keeping is wrong and too low, we'll
see a zero value (going to -1) for the root PLT reference
}
/* Unwinding tables are not referenced directly. This pass marks them as
- required if the corresponding code section is marked. */
+ required if the corresponding code section is marked. Similarly, ARMv8-M
+ secure entry functions can only be referenced by SG veneers which are
+ created after the GC process. They need to be marked in case they reside in
+ their own section (as would be the case if code was compiled with
+ -ffunction-sections). */
static bfd_boolean
elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
{
bfd *sub;
Elf_Internal_Shdr **elf_shdrp;
- bfd_boolean again;
+ asection *cmse_sec;
+ obj_attribute *out_attr;
+ Elf_Internal_Shdr *symtab_hdr;
+ unsigned i, sym_count, ext_start;
+ const struct elf_backend_data *bed;
+ struct elf_link_hash_entry **sym_hashes;
+ struct elf32_arm_link_hash_entry *cmse_hash;
+ bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
_bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
+ out_attr = elf_known_obj_attributes_proc (info->output_bfd);
+ is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
+ && out_attr[Tag_CPU_arch_profile].i == 'M';
+
/* Marking EH data may cause additional code sections to be marked,
requiring multiple passes. */
again = TRUE;
return FALSE;
}
}
+
+ /* Mark section holding ARMv8-M secure entry functions. We mark all
+ of them so no need for a second browsing. */
+ if (is_v8m && first_bfd_browse)
+ {
+ sym_hashes = elf_sym_hashes (sub);
+ bed = get_elf_backend_data (sub);
+ symtab_hdr = &elf_tdata (sub)->symtab_hdr;
+ sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
+ ext_start = symtab_hdr->sh_info;
+
+ /* Scan symbols. */
+ for (i = ext_start; i < sym_count; i++)
+ {
+ cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
+
+ /* Assume it is a special symbol. If not, cmse_scan will
+ warn about it and user can do something about it. */
+ if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
+ {
+ cmse_sec = cmse_hash->root.root.u.def.section;
+ if (!_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
+ return FALSE;
+ }
+ }
+ }
}
+ first_bfd_browse = FALSE;
}
return TRUE;
const void *pshn,
Elf_Internal_Sym *dst)
{
+ Elf_Internal_Shdr *symtab_hdr;
+ const char *name = NULL;
+
if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
return FALSE;
dst->st_target_internal = 0;
else
ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
+ /* Mark CMSE special symbols. */
+ symtab_hdr = & elf_symtab_hdr (abfd);
+ if (symtab_hdr->sh_size)
+ name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
+ if (name && CONST_STRNEQ (name, CMSE_PREFIX))
+ ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
+
return TRUE;
}