+#define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
+
+#define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
+#define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
+#define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
+#define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
+#define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
+#define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
+
+#define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
+#define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
+#define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
+#define AARCH64_ZR 0x1f
+
+/* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
+ LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
+
+#define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
+#define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
+#define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
+#define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
+#define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
+#define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
+#define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
+#define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
+#define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
+#define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
+#define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
+#define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
+#define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
+#define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
+#define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
+#define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
+#define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
+#define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
+
+/* Classify an INSN if it is indeed a load/store. Return TRUE if INSN
+ is a load/store along with the Rt and Rtn. Return FALSE if not a
+ load/store. */
+
+static bfd_boolean
+aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn,
+ bfd_boolean *pair, bfd_boolean *load)
+{
+ uint32_t opcode;
+ unsigned int r;
+ uint32_t opc = 0;
+ uint32_t v = 0;
+ uint32_t opc_v = 0;
+
+ /* Bail out quickly if INSN doesn't fall into the the load-store
+ encoding space. */
+ if (!AARCH64_LDST (insn))
+ return FALSE;
+
+ *pair = FALSE;
+ *load = FALSE;
+ if (AARCH64_LDST_EX (insn))
+ {
+ *rt = AARCH64_RT (insn);
+ *rtn = *rt;
+ if (AARCH64_BIT (insn, 21) == 1)
+ {
+ *pair = TRUE;
+ *rtn = AARCH64_RT2 (insn);
+ }
+ *load = AARCH64_LD (insn);
+ return TRUE;
+ }
+ else if (AARCH64_LDST_NAP (insn)
+ || AARCH64_LDSTP_PI (insn)
+ || AARCH64_LDSTP_O (insn)
+ || AARCH64_LDSTP_PRE (insn))
+ {
+ *pair = TRUE;
+ *rt = AARCH64_RT (insn);
+ *rtn = AARCH64_RT2 (insn);
+ *load = AARCH64_LD (insn);
+ return TRUE;
+ }
+ else if (AARCH64_LDST_PCREL (insn)
+ || AARCH64_LDST_UI (insn)
+ || AARCH64_LDST_PIIMM (insn)
+ || AARCH64_LDST_U (insn)
+ || AARCH64_LDST_PREIMM (insn)
+ || AARCH64_LDST_RO (insn)
+ || AARCH64_LDST_UIMM (insn))
+ {
+ *rt = AARCH64_RT (insn);
+ *rtn = *rt;
+ if (AARCH64_LDST_PCREL (insn))
+ *load = TRUE;
+ opc = AARCH64_BITS (insn, 22, 2);
+ v = AARCH64_BIT (insn, 26);
+ opc_v = opc | (v << 2);
+ *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
+ || opc_v == 5 || opc_v == 7);
+ return TRUE;
+ }
+ else if (AARCH64_LDST_SIMD_M (insn)
+ || AARCH64_LDST_SIMD_M_PI (insn))
+ {
+ *rt = AARCH64_RT (insn);
+ *load = AARCH64_BIT (insn, 22);
+ opcode = (insn >> 12) & 0xf;
+ switch (opcode)
+ {
+ case 0:
+ case 2:
+ *rtn = *rt + 3;
+ break;
+
+ case 4:
+ case 6:
+ *rtn = *rt + 2;
+ break;
+
+ case 7:
+ *rtn = *rt;
+ break;
+
+ case 8:
+ case 10:
+ *rtn = *rt + 1;
+ break;
+
+ default:
+ return FALSE;
+ }
+ return TRUE;
+ }
+ else if (AARCH64_LDST_SIMD_S (insn)
+ || AARCH64_LDST_SIMD_S_PI (insn))
+ {
+ *rt = AARCH64_RT (insn);
+ r = (insn >> 21) & 1;
+ *load = AARCH64_BIT (insn, 22);
+ opcode = (insn >> 13) & 0x7;
+ switch (opcode)
+ {
+ case 0:
+ case 2:
+ case 4:
+ *rtn = *rt + r;
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ *rtn = *rt + (r == 0 ? 2 : 3);
+ break;
+
+ case 6:
+ *rtn = *rt + r;
+ break;
+
+ case 7:
+ *rtn = *rt + (r == 0 ? 2 : 3);
+ break;
+
+ default:
+ return FALSE;
+ }
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/* Return TRUE if INSN is multiply-accumulate. */
+
+static bfd_boolean
+aarch64_mlxl_p (uint32_t insn)
+{
+ uint32_t op31 = AARCH64_OP31 (insn);
+
+ if (AARCH64_MAC (insn)
+ && (op31 == 0 || op31 == 1 || op31 == 5)
+ /* Exclude MUL instructions which are encoded as a multiple accumulate
+ with RA = XZR. */
+ && AARCH64_RA (insn) != AARCH64_ZR)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
+ it is possible for a 64-bit multiply-accumulate instruction to generate an
+ incorrect result. The details are quite complex and hard to
+ determine statically, since branches in the code may exist in some
+ circumstances, but all cases end with a memory (load, store, or
+ prefetch) instruction followed immediately by the multiply-accumulate
+ operation. We employ a linker patching technique, by moving the potentially
+ affected multiply-accumulate instruction into a patch region and replacing
+ the original instruction with a branch to the patch. This function checks
+ if INSN_1 is the memory operation followed by a multiply-accumulate
+ operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
+ if INSN_1 and INSN_2 are safe. */
+
+static bfd_boolean
+aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
+{
+ uint32_t rt;
+ uint32_t rtn;
+ uint32_t rn;
+ uint32_t rm;
+ uint32_t ra;
+ bfd_boolean pair;
+ bfd_boolean load;
+
+ if (aarch64_mlxl_p (insn_2)
+ && aarch64_mem_op_p (insn_1, &rt, &rtn, &pair, &load))
+ {
+ /* Any SIMD memory op is independent of the subsequent MLA
+ by definition of the erratum. */
+ if (AARCH64_BIT (insn_1, 26))
+ return TRUE;
+
+ /* If not SIMD, check for integer memory ops and MLA relationship. */
+ rn = AARCH64_RN (insn_2);
+ ra = AARCH64_RA (insn_2);
+ rm = AARCH64_RM (insn_2);
+
+ /* If this is a load and there's a true(RAW) dependency, we are safe
+ and this is not an erratum sequence. */
+ if (load &&
+ (rt == rn || rt == rm || rt == ra
+ || (pair && (rtn == rn || rtn == rm || rtn == ra))))
+ return FALSE;
+
+ /* We conservatively put out stubs for all other cases (including
+ writebacks). */
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/* Used to order a list of mapping symbols by address. */
+
+static int
+elf_aarch64_compare_mapping (const void *a, const void *b)
+{
+ const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
+ const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
+
+ if (amap->vma > bmap->vma)
+ return 1;
+ else if (amap->vma < bmap->vma)
+ return -1;
+ else if (amap->type > bmap->type)
+ /* Ensure results do not depend on the host qsort for objects with
+ multiple mapping symbols at the same address by sorting on type
+ after vma. */
+ return 1;
+ else if (amap->type < bmap->type)
+ return -1;
+ else
+ return 0;
+}
+
+static bfd_boolean
+erratum_835769_scan (bfd *input_bfd,
+ struct bfd_link_info *info,
+ struct aarch64_erratum_835769_fix **fixes_p,
+ unsigned int *num_fixes_p,
+ unsigned int *fix_table_size_p)
+{
+ asection *section;
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+ struct aarch64_erratum_835769_fix *fixes = *fixes_p;
+ unsigned int num_fixes = *num_fixes_p;
+ unsigned int fix_table_size = *fix_table_size_p;
+
+ if (htab == NULL)
+ return FALSE;
+
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ bfd_byte *contents = NULL;
+ struct _aarch64_elf_section_data *sec_data;
+ unsigned int span;
+
+ if (elf_section_type (section) != SHT_PROGBITS
+ || (elf_section_flags (section) & SHF_EXECINSTR) == 0
+ || (section->flags & SEC_EXCLUDE) != 0
+ || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
+ || (section->output_section == bfd_abs_section_ptr))
+ continue;
+
+ if (elf_section_data (section)->this_hdr.contents != NULL)
+ contents = elf_section_data (section)->this_hdr.contents;
+ else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
+ return TRUE;
+
+ sec_data = elf_aarch64_section_data (section);
+
+ qsort (sec_data->map, sec_data->mapcount,
+ sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
+
+ for (span = 0; span < sec_data->mapcount; span++)
+ {
+ unsigned int span_start = sec_data->map[span].vma;
+ unsigned int span_end = ((span == sec_data->mapcount - 1)
+ ? sec_data->map[0].vma + section->size
+ : sec_data->map[span + 1].vma);
+ unsigned int i;
+ char span_type = sec_data->map[span].type;
+
+ if (span_type == 'd')
+ continue;
+
+ for (i = span_start; i + 4 < span_end; i += 4)
+ {
+ uint32_t insn_1 = bfd_getl32 (contents + i);
+ uint32_t insn_2 = bfd_getl32 (contents + i + 4);
+
+ if (aarch64_erratum_sequence (insn_1, insn_2))
+ {
+ char *stub_name = NULL;
+ stub_name = (char *) bfd_malloc
+ (strlen ("__erratum_835769_veneer_") + 16);
+ if (stub_name != NULL)
+ sprintf
+ (stub_name,"__erratum_835769_veneer_%d", num_fixes);
+ else
+ return TRUE;
+
+ if (num_fixes == fix_table_size)
+ {
+ fix_table_size *= 2;
+ fixes =
+ (struct aarch64_erratum_835769_fix *)
+ bfd_realloc (fixes,
+ sizeof (struct aarch64_erratum_835769_fix)
+ * fix_table_size);
+ if (fixes == NULL)
+ return TRUE;
+ }
+
+ fixes[num_fixes].input_bfd = input_bfd;
+ fixes[num_fixes].section = section;
+ fixes[num_fixes].offset = i + 4;
+ fixes[num_fixes].veneered_insn = insn_2;
+ fixes[num_fixes].stub_name = stub_name;
+ fixes[num_fixes].stub_type = aarch64_stub_erratum_835769_veneer;
+ num_fixes++;
+ }
+ }
+ }
+ if (elf_section_data (section)->this_hdr.contents == NULL)
+ free (contents);
+ }
+
+ *fixes_p = fixes;
+ *num_fixes_p = num_fixes;
+ *fix_table_size_p = fix_table_size;
+ return FALSE;
+}
+
+/* Find or create a stub section. Returns a pointer to the stub section, and
+ the section to which the stub section will be attached (in *LINK_SEC_P).
+ LINK_SEC_P may be NULL. */
+
+static asection *
+elf_aarch64_create_or_find_stub_sec (asection **link_sec_p, asection *section,
+ struct elf_aarch64_link_hash_table *htab)
+{
+ asection *link_sec;
+ asection *stub_sec;
+
+ link_sec = htab->stub_group[section->id].link_sec;
+ BFD_ASSERT (link_sec != NULL);
+ stub_sec = htab->stub_group[section->id].stub_sec;
+
+ if (stub_sec == NULL)
+ {
+ stub_sec = htab->stub_group[link_sec->id].stub_sec;
+ if (stub_sec == NULL)
+ {
+ size_t namelen;
+ bfd_size_type len;
+ char *s_name;
+
+ namelen = strlen (link_sec->name);
+ len = namelen + sizeof (STUB_SUFFIX);
+ s_name = (char *) bfd_alloc (htab->stub_bfd, len);
+ if (s_name == NULL)
+ return NULL;
+
+ memcpy (s_name, link_sec->name, namelen);
+ memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
+ stub_sec = (*htab->add_stub_section) (s_name, link_sec);
+
+ if (stub_sec == NULL)
+ return NULL;
+ htab->stub_group[link_sec->id].stub_sec = stub_sec;
+ }
+ htab->stub_group[section->id].stub_sec = stub_sec;
+ }
+
+ if (link_sec_p)
+ *link_sec_p = link_sec;
+
+ return stub_sec;
+}
+