/* tc-arm.c -- Assemble for the ARM
- Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
- 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
- Free Software Foundation, Inc.
+ Copyright 1994-2013 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
Modified by David Taylor (dtaylor@armltd.co.uk)
Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
static const arm_feature_set fpu_crypto_ext_armv8 =
ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
+static const arm_feature_set crc_ext_armv8 =
+ ARM_FEATURE (0, CRC_EXT_ARMV8);
static int mfloat_abi_opt = -1;
/* Record user cpu selection for object attributes. */
static bfd_boolean unified_syntax = FALSE;
+/* An immediate operand can start with #, and ld*, st*, pld operands
+ can contain [ and ]. We need to tell APP not to elide whitespace
+ before a [, which can appear as the first operand for pld. */
+const char arm_symbol_chars[] = "#[]";
+
enum neon_el_type
{
NT_invtype,
#define BAD_PC_WRITEBACK \
_("cannot use writeback with PC-relative addressing")
#define BAD_RANGE _("branch out of range")
+#define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
static struct hash_control * arm_ops_hsh;
static struct hash_control * arm_cond_hsh;
static inline int
skip_past_char (char ** str, char c)
{
+ /* PR gas/14987: Allow for whitespace before the expected character. */
+ skip_whitespace (*str);
+
if (**str == c)
{
(*str)++;
static void
s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
{
- int tag = s_vendor_attribute (OBJ_ATTR_PROC);
+ int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
attributes_set_explicitly[tag] = 1;
return PARSE_OPERAND_SUCCESS;
}
+ /* PR gas/14887: Allow for whitespace after the opening bracket. */
+ skip_whitespace (p);
+
if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
{
inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
do \
{ \
val = parse_barrier (&str); \
- if (val == FAIL) \
+ if (val == FAIL && ! ISALPHA (*str)) \
+ goto immediate; \
+ if (val == FAIL \
+ /* ISB can only take SY as an option. */ \
+ || ((inst.instruction & 0xf0) == 0x60 \
+ && val != 0xf)) \
{ \
- if (ISALPHA (*str)) \
- goto failure; \
- else \
- goto immediate; \
- } \
- else \
- { \
- if ((inst.instruction & 0xf0) == 0x60 \
- && val != 0xf) \
- { \
- /* ISB can only take SY as an option. */ \
- inst.error = _("invalid barrier type"); \
- goto failure; \
- } \
+ inst.error = _("invalid barrier type"); \
+ backtrack_pos = 0; \
+ goto failure; \
} \
} \
while (0)
if (inst.operands[i].immisreg)
{
constraint ((inst.operands[i].imm == REG_PC
- || inst.operands[i].reg == REG_PC),
+ || (is_t && inst.operands[i].reg == REG_PC)),
BAD_PC_ADDRESSING);
+ constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
+ BAD_PC_WRITEBACK);
inst.instruction |= inst.operands[i].imm;
if (!inst.operands[i].negative)
inst.instruction |= INDEX_UP;
}
static void
+do_rm_rn (void)
+{
+ inst.instruction |= inst.operands[0].reg;
+ inst.instruction |= inst.operands[1].reg << 16;
+}
+
+static void
do_rd_rn (void)
{
inst.instruction |= inst.operands[0].reg << 12;
do_barrier (void)
{
if (inst.operands[0].present)
- {
- constraint ((inst.instruction & 0xf0) != 0x40
- && inst.operands[0].imm > 0xf
- && inst.operands[0].imm < 0x0,
- _("bad barrier type"));
- inst.instruction |= inst.operands[0].imm;
- }
+ inst.instruction |= inst.operands[0].imm;
else
inst.instruction |= 0xf;
}
&& inst.operands[4].reg == r->crm
&& inst.operands[5].imm == r->opc2)
{
- if (!check_obsolete (&r->obsoleted, r->obs_msg)
+ if (! ARM_CPU_IS_ANY (cpu_variant)
&& warn_on_deprecated
&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
as_warn ("%s", r->dep_msg);
{
unsigned Rt = inst.operands[0].reg;
- if (thumb_mode && inst.operands[0].reg == REG_SP)
+ if (thumb_mode && Rt == REG_SP)
{
inst.error = BAD_SP;
return;
}
/* APSR_ sets isvec. All other refs to PC are illegal. */
- if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
+ if (!inst.operands[0].isvec && Rt == REG_PC)
{
inst.error = BAD_PC;
return;
}
- switch (inst.operands[1].reg)
- {
- case 0: /* FPSID */
- case 1: /* FPSCR */
- case 6: /* MVFR1 */
- case 7: /* MVFR0 */
- case 8: /* FPEXC */
- inst.instruction |= (inst.operands[1].reg << 16);
- break;
- default:
- first_error (_("operand 1 must be a VFP extension System Register"));
- }
-
+ /* If we get through parsing the register name, we just insert the number
+ generated into the instruction without further validation. */
+ inst.instruction |= (inst.operands[1].reg << 16);
inst.instruction |= (Rt << 12);
}
return;
}
- switch (inst.operands[0].reg)
- {
- case 0: /* FPSID */
- case 1: /* FPSCR */
- case 8: /* FPEXC */
- inst.instruction |= (inst.operands[0].reg << 16);
- break;
- default:
- first_error (_("operand 0 must be FPSID or FPSCR pr FPEXC"));
- }
-
+ /* If we get through parsing the register name, we just insert the number
+ generated into the instruction without further validation. */
+ inst.instruction |= (inst.operands[0].reg << 16);
inst.instruction |= (Rt << 12);
}
inst.instruction |= inst.operands[3].reg << 16;
}
+/* ARM V8 STRL. */
+static void
+do_stlex (void)
+{
+ constraint (inst.operands[0].reg == inst.operands[1].reg
+ || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
+
+ do_rd_rm_rn ();
+}
+
+static void
+do_t_stlex (void)
+{
+ constraint (inst.operands[0].reg == inst.operands[1].reg
+ || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
+
+ do_rm_rd_rn ();
+}
+
/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
extends it to 32-bits, and adds the result to a value in another
register. You can specify a rotation by 0, 8, 16, or 24 bits
constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
constraint (is_t && inst.operands[i].writeback,
_("cannot use writeback with this instruction"));
- constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
- && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
+ constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
+ BAD_PC_ADDRESSING);
if (is_d)
{
}
static void
-do_t_barrier (void)
-{
- if (inst.operands[0].present)
- {
- constraint ((inst.instruction & 0xf0) != 0x40
- && inst.operands[0].imm > 0xf
- && inst.operands[0].imm < 0x0,
- _("bad barrier type"));
- inst.instruction |= inst.operands[0].imm;
- }
- else
- inst.instruction |= 0xf;
-}
-
-static void
do_t_bfc (void)
{
unsigned Rd;
}
/* Actually do the work for Thumb state bkpt and hlt. The only difference
- between the two is the maximum immediate allowed - which is passed in
+ between the two is the maximum immediate allowed - which is passed in
RANGE. */
static void
do_t_bkpt_hlt1 (int range)
X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
- X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
+ X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
+ X(vseleq, 0xe000a00, N_INV, N_INV), \
+ X(vselvs, 0xe100a00, N_INV, N_INV), \
+ X(vselge, 0xe200a00, N_INV, N_INV), \
+ X(vselgt, 0xe300a00, N_INV, N_INV), \
+ X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
+ X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
+ X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
+ X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
+ X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
+ X(aes, 0x3b00300, N_INV, N_INV), \
+ X(sha3op, 0x2000c00, N_INV, N_INV), \
+ X(sha1h, 0x3b902c0, N_INV, N_INV), \
+ X(sha2op, 0x3ba0380, N_INV, N_INV)
enum neon_opc
{
((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
#define NEON_ENC_DOUBLE_(X) \
((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
+#define NEON_ENC_FPV8_(X) \
+ ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
#define NEON_ENCODE(type, inst) \
do \
N_F16 = 0x0040000,
N_F32 = 0x0080000,
N_F64 = 0x0100000,
+ N_P64 = 0x0200000,
N_KEY = 0x1000000, /* Key element (main type specifier). */
N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
+ N_UNT = 0x8000000, /* Must be explicitly untyped. */
N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
N_UTYP = 0,
- N_MAX_NONSPECIAL = N_F64
+ N_MAX_NONSPECIAL = N_P64
};
#define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
{
case 8: return N_P8;
case 16: return N_P16;
+ case 64: return N_P64;
default: ;
}
break;
if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
*size = 8;
- else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
+ else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
*size = 16;
else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
*size = 32;
- else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
+ else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
*size = 64;
else
return FAIL;
*type = NT_integer;
else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
*type = NT_untyped;
- else if ((mask & (N_P8 | N_P16)) != 0)
+ else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
*type = NT_poly;
- else if ((mask & (N_F32 | N_F64)) != 0)
+ else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
*type = NT_float;
else
return FAIL;
/* If only untyped args are allowed, decay any more specific types to
them. Some instructions only care about signs for some element
sizes, so handle that properly. */
- if ((g_size == 8 && (types_allowed & N_8) != 0)
- || (g_size == 16 && (types_allowed & N_16) != 0)
- || (g_size == 32 && (types_allowed & N_32) != 0)
- || (g_size == 64 && (types_allowed & N_64) != 0))
+ if (((types_allowed & N_UNT) == 0)
+ && ((g_size == 8 && (types_allowed & N_8) != 0)
+ || (g_size == 16 && (types_allowed & N_16) != 0)
+ || (g_size == 32 && (types_allowed & N_32) != 0)
+ || (g_size == 64 && (types_allowed & N_64) != 0)))
g_type = NT_untyped;
if (pass == 0)
enum vfp_or_neon_is_neon_bits
{
NEON_CHECK_CC = 1,
- NEON_CHECK_ARCH = 2
+ NEON_CHECK_ARCH = 2,
+ NEON_CHECK_ARCH8 = 4
};
/* Call this function if an instruction which may have belonged to the VFP or
}
if ((check & NEON_CHECK_ARCH)
- && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
+ && !mark_feature_used (&fpu_neon_ext_v1))
+ {
+ first_error (_(BAD_FPU));
+ return FAIL;
+ }
+
+ if ((check & NEON_CHECK_ARCH8)
+ && !mark_feature_used (&fpu_neon_ext_armv8))
{
first_error (_(BAD_FPU));
return FAIL;
/* Check the various types for the VCVT instruction, and return which version
the current instruction is. */
-static int
-neon_cvt_flavour (enum neon_shape rs)
+#define CVT_FLAVOUR_VAR \
+ CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
+ CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
+ CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
+ CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
+ /* Half-precision conversions. */ \
+ CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
+ CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
+ /* VFP instructions. */ \
+ CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
+ CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
+ CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
+ CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
+ CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
+ CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
+ /* VFP instructions with bitshift. */ \
+ CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
+ CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
+ CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
+ CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
+ CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
+ CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
+ CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
+ CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
+
+#define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
+ neon_cvt_flavour_##C,
+
+/* The different types of conversions we can do. */
+enum neon_cvt_flavour
+{
+ CVT_FLAVOUR_VAR
+ neon_cvt_flavour_invalid,
+ neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
+};
+
+#undef CVT_VAR
+
+static enum neon_cvt_flavour
+get_neon_cvt_flavour (enum neon_shape rs)
{
-#define CVT_VAR(C,X,Y) \
- et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
- if (et.type != NT_invtype) \
- { \
- inst.error = NULL; \
- return (C); \
+#define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
+ et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
+ if (et.type != NT_invtype) \
+ { \
+ inst.error = NULL; \
+ return (neon_cvt_flavour_##C); \
}
+
struct neon_type_el et;
unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
|| rs == NS_FF) ? N_VFP : 0;
here by making the size equal to the key (wider, in this case) operand. */
unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
- CVT_VAR (0, N_S32, N_F32);
- CVT_VAR (1, N_U32, N_F32);
- CVT_VAR (2, N_F32, N_S32);
- CVT_VAR (3, N_F32, N_U32);
- /* Half-precision conversions. */
- CVT_VAR (4, N_F32, N_F16);
- CVT_VAR (5, N_F16, N_F32);
-
- whole_reg = N_VFP;
-
- /* VFP instructions. */
- CVT_VAR (6, N_F32, N_F64);
- CVT_VAR (7, N_F64, N_F32);
- CVT_VAR (8, N_S32, N_F64 | key);
- CVT_VAR (9, N_U32, N_F64 | key);
- CVT_VAR (10, N_F64 | key, N_S32);
- CVT_VAR (11, N_F64 | key, N_U32);
- /* VFP instructions with bitshift. */
- CVT_VAR (12, N_F32 | key, N_S16);
- CVT_VAR (13, N_F32 | key, N_U16);
- CVT_VAR (14, N_F64 | key, N_S16);
- CVT_VAR (15, N_F64 | key, N_U16);
- CVT_VAR (16, N_S16, N_F32 | key);
- CVT_VAR (17, N_U16, N_F32 | key);
- CVT_VAR (18, N_S16, N_F64 | key);
- CVT_VAR (19, N_U16, N_F64 | key);
+ CVT_FLAVOUR_VAR;
- return -1;
+ return neon_cvt_flavour_invalid;
#undef CVT_VAR
}
+enum neon_cvt_mode
+{
+ neon_cvt_mode_a,
+ neon_cvt_mode_n,
+ neon_cvt_mode_p,
+ neon_cvt_mode_m,
+ neon_cvt_mode_z,
+ neon_cvt_mode_x,
+ neon_cvt_mode_r
+};
+
/* Neon-syntax VFP conversions. */
static void
-do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
+do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
{
const char *opname = 0;
/* Conversions with immediate bitshift. */
const char *enc[] =
{
- "ftosls",
- "ftouls",
- "fsltos",
- "fultos",
- NULL,
- NULL,
- NULL,
- NULL,
- "ftosld",
- "ftould",
- "fsltod",
- "fultod",
- "fshtos",
- "fuhtos",
- "fshtod",
- "fuhtod",
- "ftoshs",
- "ftouhs",
- "ftoshd",
- "ftouhd"
+#define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
+ CVT_FLAVOUR_VAR
+ NULL
+#undef CVT_VAR
};
- if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
+ if (flavour < (int) ARRAY_SIZE (enc))
{
opname = enc[flavour];
constraint (inst.operands[0].reg != inst.operands[1].reg,
/* Conversions without bitshift. */
const char *enc[] =
{
- "ftosis",
- "ftouis",
- "fsitos",
- "fuitos",
- "NULL",
- "NULL",
- "fcvtsd",
- "fcvtds",
- "ftosid",
- "ftouid",
- "fsitod",
- "fuitod"
+#define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
+ CVT_FLAVOUR_VAR
+ NULL
+#undef CVT_VAR
};
- if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
+ if (flavour < (int) ARRAY_SIZE (enc))
opname = enc[flavour];
}
do_vfp_nsyn_cvtz (void)
{
enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
- int flavour = neon_cvt_flavour (rs);
+ enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
const char *enc[] =
{
- "ftosizs",
- "ftouizs",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "ftosizd",
- "ftouizd"
+#define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
+ CVT_FLAVOUR_VAR
+ NULL
+#undef CVT_VAR
};
- if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
+ if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
do_vfp_nsyn_opcode (enc[flavour]);
}
static void
-do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED)
+do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
+ enum neon_cvt_mode mode)
+{
+ int sz, op;
+ int rm;
+
+ set_it_insn_type (OUTSIDE_IT_INSN);
+
+ switch (flavour)
+ {
+ case neon_cvt_flavour_s32_f64:
+ sz = 1;
+ op = 0;
+ break;
+ case neon_cvt_flavour_s32_f32:
+ sz = 0;
+ op = 1;
+ break;
+ case neon_cvt_flavour_u32_f64:
+ sz = 1;
+ op = 0;
+ break;
+ case neon_cvt_flavour_u32_f32:
+ sz = 0;
+ op = 0;
+ break;
+ default:
+ first_error (_("invalid instruction shape"));
+ return;
+ }
+
+ switch (mode)
+ {
+ case neon_cvt_mode_a: rm = 0; break;
+ case neon_cvt_mode_n: rm = 1; break;
+ case neon_cvt_mode_p: rm = 2; break;
+ case neon_cvt_mode_m: rm = 3; break;
+ default: first_error (_("invalid rounding mode")); return;
+ }
+
+ NEON_ENCODE (FPV8, inst);
+ encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
+ encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
+ inst.instruction |= sz << 8;
+ inst.instruction |= op << 7;
+ inst.instruction |= rm << 16;
+ inst.instruction |= 0xf0000000;
+ inst.is_neon = TRUE;
+}
+
+static void
+do_neon_cvt_1 (enum neon_cvt_mode mode)
{
enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
- int flavour = neon_cvt_flavour (rs);
+ enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
/* PR11109: Handle round-to-zero for VCVT conversions. */
- if (round_to_zero
+ if (mode == neon_cvt_mode_z
&& ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
- && (flavour == 0 || flavour == 1 || flavour == 8 || flavour == 9)
+ && (flavour == neon_cvt_flavour_s32_f32
+ || flavour == neon_cvt_flavour_u32_f32
+ || flavour == neon_cvt_flavour_s32_f64
+ || flavour == neon_cvt_flavour_u32_f64)
&& (rs == NS_FD || rs == NS_FF))
{
do_vfp_nsyn_cvtz ();
}
/* VFP rather than Neon conversions. */
- if (flavour >= 6)
+ if (flavour >= neon_cvt_flavour_first_fp)
{
- do_vfp_nsyn_cvt (rs, flavour);
+ if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
+ do_vfp_nsyn_cvt (rs, flavour);
+ else
+ do_vfp_nsyn_cvt_fpv8 (flavour, mode);
+
return;
}
goto int_encode;
immbits = 32 - inst.operands[2].imm;
NEON_ENCODE (IMMED, inst);
- if (flavour != -1)
+ if (flavour != neon_cvt_flavour_invalid)
inst.instruction |= enctab[flavour];
inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
inst.instruction |= HI1 (inst.operands[0].reg) << 22;
case NS_DD:
case NS_QQ:
+ if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
+ {
+ NEON_ENCODE (FLOAT, inst);
+ set_it_insn_type (OUTSIDE_IT_INSN);
+
+ if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
+ return;
+
+ inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
+ inst.instruction |= HI1 (inst.operands[0].reg) << 22;
+ inst.instruction |= LOW4 (inst.operands[1].reg);
+ inst.instruction |= HI1 (inst.operands[1].reg) << 5;
+ inst.instruction |= neon_quad (rs) << 6;
+ inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
+ inst.instruction |= mode << 8;
+ if (thumb_mode)
+ inst.instruction |= 0xfc000000;
+ else
+ inst.instruction |= 0xf0000000;
+ }
+ else
+ {
int_encode:
- {
- unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
+ {
+ unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
- NEON_ENCODE (INTEGER, inst);
+ NEON_ENCODE (INTEGER, inst);
- if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
- return;
+ if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
+ return;
- if (flavour != -1)
- inst.instruction |= enctab[flavour];
+ if (flavour != neon_cvt_flavour_invalid)
+ inst.instruction |= enctab[flavour];
- inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
- inst.instruction |= HI1 (inst.operands[0].reg) << 22;
- inst.instruction |= LOW4 (inst.operands[1].reg);
- inst.instruction |= HI1 (inst.operands[1].reg) << 5;
- inst.instruction |= neon_quad (rs) << 6;
- inst.instruction |= 2 << 18;
+ inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
+ inst.instruction |= HI1 (inst.operands[0].reg) << 22;
+ inst.instruction |= LOW4 (inst.operands[1].reg);
+ inst.instruction |= HI1 (inst.operands[1].reg) << 5;
+ inst.instruction |= neon_quad (rs) << 6;
+ inst.instruction |= 2 << 18;
- neon_dp_fixup (&inst);
- }
- break;
+ neon_dp_fixup (&inst);
+ }
+ }
+ break;
/* Half-precision conversions for Advanced SIMD -- neon. */
case NS_QD:
default:
/* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
- do_vfp_nsyn_cvt (rs, flavour);
+ if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
+ do_vfp_nsyn_cvt (rs, flavour);
+ else
+ do_vfp_nsyn_cvt_fpv8 (flavour, mode);
}
}
static void
do_neon_cvtr (void)
{
- do_neon_cvt_1 (FALSE);
+ do_neon_cvt_1 (neon_cvt_mode_x);
}
static void
do_neon_cvt (void)
{
- do_neon_cvt_1 (TRUE);
+ do_neon_cvt_1 (neon_cvt_mode_z);
}
static void
-do_neon_cvtb (void)
+do_neon_cvta (void)
{
- inst.instruction = 0xeb20a40;
+ do_neon_cvt_1 (neon_cvt_mode_a);
+}
- /* The sizes are attached to the mnemonic. */
- if (inst.vectype.el[0].type != NT_invtype
- && inst.vectype.el[0].size == 16)
- inst.instruction |= 0x00010000;
+static void
+do_neon_cvtn (void)
+{
+ do_neon_cvt_1 (neon_cvt_mode_n);
+}
- /* Programmer's syntax: the sizes are attached to the operands. */
- else if (inst.operands[0].vectype.type != NT_invtype
- && inst.operands[0].vectype.size == 16)
- inst.instruction |= 0x00010000;
+static void
+do_neon_cvtp (void)
+{
+ do_neon_cvt_1 (neon_cvt_mode_p);
+}
- encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
- encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
+static void
+do_neon_cvtm (void)
+{
+ do_neon_cvt_1 (neon_cvt_mode_m);
+}
+
+static void
+do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
+{
+ if (is_double)
+ mark_feature_used (&fpu_vfp_ext_armv8);
+
+ encode_arm_vfp_reg (inst.operands[0].reg,
+ (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
+ encode_arm_vfp_reg (inst.operands[1].reg,
+ (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
+ inst.instruction |= to ? 0x10000 : 0;
+ inst.instruction |= t ? 0x80 : 0;
+ inst.instruction |= is_double ? 0x100 : 0;
do_vfp_cond_or_thumb ();
}
+static void
+do_neon_cvttb_1 (bfd_boolean t)
+{
+ enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
+
+ if (rs == NS_NULL)
+ return;
+ else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
+ {
+ inst.error = NULL;
+ do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
+ }
+ else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
+ {
+ inst.error = NULL;
+ do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
+ }
+ else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
+ {
+ inst.error = NULL;
+ do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
+ }
+ else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
+ {
+ inst.error = NULL;
+ do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
+ }
+ else
+ return;
+}
+
+static void
+do_neon_cvtb (void)
+{
+ do_neon_cvttb_1 (FALSE);
+}
+
static void
do_neon_cvtt (void)
{
- do_neon_cvtb ();
- inst.instruction |= 0x80;
+ do_neon_cvttb_1 (TRUE);
}
static void
else
{
struct neon_type_el et = neon_check_type (3, NS_QDD,
- N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
+ N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
+
if (et.type == NT_poly)
NEON_ENCODE (POLY, inst);
else
NEON_ENCODE (INTEGER, inst);
- /* For polynomial encoding, size field must be 0b00 and the U bit must be
- zero. Should be OK as-is. */
+
+ /* For polynomial encoding the U bit must be zero, and the size must
+ be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
+ obviously, as 0b10). */
+ if (et.size == 64)
+ {
+ /* Check we're on the correct architecture. */
+ if (!mark_feature_used (&fpu_crypto_ext_armv8))
+ inst.error =
+ _("Instruction form not available on this architecture.");
+
+ et.size = 32;
+ }
+
neon_mixed_length (et, et.size);
}
}
unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
+ /* .<size> is optional here, defaulting to .32. */
+ if (inst.vectype.elems == 0
+ && inst.operands[0].vectype.type == NT_invtype
+ && inst.operands[1].vectype.type == NT_invtype)
+ {
+ inst.vectype.el[0].type = NT_untyped;
+ inst.vectype.el[0].size = 32;
+ inst.vectype.elems = 1;
+ }
+
et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
logsize = neon_logbits (et.size);
unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
unsigned abcdebits = 0;
+ /* .<dt> is optional here, defaulting to .32. */
+ if (inst.vectype.elems == 0
+ && inst.operands[0].vectype.type == NT_invtype
+ && inst.operands[1].vectype.type == NT_invtype)
+ {
+ inst.vectype.el[0].type = NT_untyped;
+ inst.vectype.el[0].size = 32;
+ inst.vectype.elems = 1;
+ }
+
et = neon_check_type (2, NS_NULL,
N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
logsize = neon_logbits (et.size);
do_vfp_nsyn_opcode ("fmsrr");
break;
+ case NS_NULL:
+ /* neon_select_shape has determined that the instruction
+ shape is wrong and has already set the error message. */
+ break;
+
default:
abort ();
}
And is UNPREDICTABLE in thumb mode. */
if (!is_ldr
&& inst.operands[1].reg == REG_PC
- && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
+ && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
{
- if (!thumb_mode && warn_on_deprecated)
- as_warn (_("Use of PC here is deprecated"));
- else
+ if (thumb_mode)
inst.error = _("Use of PC here is UNPREDICTABLE");
+ else if (warn_on_deprecated)
+ as_warn (_("Use of PC here is deprecated"));
}
if (inst.operands[0].issingle)
case NEON_ALL_LANES:
NEON_ENCODE (DUP, inst);
+ if (inst.instruction == N_INV)
+ {
+ first_error ("only loads support such operands");
+ break;
+ }
do_neon_ld_dup ();
break;
else
inst.instruction |= 0xf4000000;
}
+
+/* FP v8. */
+static void
+do_vfp_nsyn_fpv8 (enum neon_shape rs)
+{
+ NEON_ENCODE (FPV8, inst);
+
+ if (rs == NS_FFF)
+ do_vfp_sp_dyadic ();
+ else
+ do_vfp_dp_rd_rn_rm ();
+
+ if (rs == NS_DDD)
+ inst.instruction |= 0x100;
+
+ inst.instruction |= 0xf0000000;
+}
+
+static void
+do_vsel (void)
+{
+ set_it_insn_type (OUTSIDE_IT_INSN);
+
+ if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
+ first_error (_("invalid instruction shape"));
+}
+
+static void
+do_vmaxnm (void)
+{
+ set_it_insn_type (OUTSIDE_IT_INSN);
+
+ if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
+ return;
+
+ if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
+ return;
+
+ neon_dyadic_misc (NT_untyped, N_F32, 0);
+}
+
+static void
+do_vrint_1 (enum neon_cvt_mode mode)
+{
+ enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
+ struct neon_type_el et;
+
+ if (rs == NS_NULL)
+ return;
+
+ et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
+ if (et.type != NT_invtype)
+ {
+ /* VFP encodings. */
+ if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
+ || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
+ set_it_insn_type (OUTSIDE_IT_INSN);
+
+ NEON_ENCODE (FPV8, inst);
+ if (rs == NS_FF)
+ do_vfp_sp_monadic ();
+ else
+ do_vfp_dp_rd_rm ();
+
+ switch (mode)
+ {
+ case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
+ case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
+ case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
+ case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
+ case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
+ case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
+ case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
+ default: abort ();
+ }
+
+ inst.instruction |= (rs == NS_DD) << 8;
+ do_vfp_cond_or_thumb ();
+ }
+ else
+ {
+ /* Neon encodings (or something broken...). */
+ inst.error = NULL;
+ et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
+
+ if (et.type == NT_invtype)
+ return;
+
+ set_it_insn_type (OUTSIDE_IT_INSN);
+ NEON_ENCODE (FLOAT, inst);
+
+ if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
+ return;
+
+ inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
+ inst.instruction |= HI1 (inst.operands[0].reg) << 22;
+ inst.instruction |= LOW4 (inst.operands[1].reg);
+ inst.instruction |= HI1 (inst.operands[1].reg) << 5;
+ inst.instruction |= neon_quad (rs) << 6;
+ switch (mode)
+ {
+ case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
+ case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
+ case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
+ case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
+ case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
+ case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
+ case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
+ default: abort ();
+ }
+
+ if (thumb_mode)
+ inst.instruction |= 0xfc000000;
+ else
+ inst.instruction |= 0xf0000000;
+ }
+}
+
+static void
+do_vrintx (void)
+{
+ do_vrint_1 (neon_cvt_mode_x);
+}
+
+static void
+do_vrintz (void)
+{
+ do_vrint_1 (neon_cvt_mode_z);
+}
+
+static void
+do_vrintr (void)
+{
+ do_vrint_1 (neon_cvt_mode_r);
+}
+
+static void
+do_vrinta (void)
+{
+ do_vrint_1 (neon_cvt_mode_a);
+}
+
+static void
+do_vrintn (void)
+{
+ do_vrint_1 (neon_cvt_mode_n);
+}
+
+static void
+do_vrintp (void)
+{
+ do_vrint_1 (neon_cvt_mode_p);
+}
+
+static void
+do_vrintm (void)
+{
+ do_vrint_1 (neon_cvt_mode_m);
+}
+
+/* Crypto v1 instructions. */
+static void
+do_crypto_2op_1 (unsigned elttype, int op)
+{
+ set_it_insn_type (OUTSIDE_IT_INSN);
+
+ if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
+ == NT_invtype)
+ return;
+
+ inst.error = NULL;
+
+ NEON_ENCODE (INTEGER, inst);
+ inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
+ inst.instruction |= HI1 (inst.operands[0].reg) << 22;
+ inst.instruction |= LOW4 (inst.operands[1].reg);
+ inst.instruction |= HI1 (inst.operands[1].reg) << 5;
+ if (op != -1)
+ inst.instruction |= op << 6;
+
+ if (thumb_mode)
+ inst.instruction |= 0xfc000000;
+ else
+ inst.instruction |= 0xf0000000;
+}
+
+static void
+do_crypto_3op_1 (int u, int op)
+{
+ set_it_insn_type (OUTSIDE_IT_INSN);
+
+ if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
+ N_32 | N_UNT | N_KEY).type == NT_invtype)
+ return;
+
+ inst.error = NULL;
+
+ NEON_ENCODE (INTEGER, inst);
+ neon_three_same (1, u, 8 << op);
+}
+
+static void
+do_aese (void)
+{
+ do_crypto_2op_1 (N_8, 0);
+}
+
+static void
+do_aesd (void)
+{
+ do_crypto_2op_1 (N_8, 1);
+}
+
+static void
+do_aesmc (void)
+{
+ do_crypto_2op_1 (N_8, 2);
+}
+
+static void
+do_aesimc (void)
+{
+ do_crypto_2op_1 (N_8, 3);
+}
+
+static void
+do_sha1c (void)
+{
+ do_crypto_3op_1 (0, 0);
+}
+
+static void
+do_sha1p (void)
+{
+ do_crypto_3op_1 (0, 1);
+}
+
+static void
+do_sha1m (void)
+{
+ do_crypto_3op_1 (0, 2);
+}
+
+static void
+do_sha1su0 (void)
+{
+ do_crypto_3op_1 (0, 3);
+}
+
+static void
+do_sha256h (void)
+{
+ do_crypto_3op_1 (1, 0);
+}
+
+static void
+do_sha256h2 (void)
+{
+ do_crypto_3op_1 (1, 1);
+}
+
+static void
+do_sha256su1 (void)
+{
+ do_crypto_3op_1 (1, 2);
+}
+
+static void
+do_sha1h (void)
+{
+ do_crypto_2op_1 (N_32, -1);
+}
+
+static void
+do_sha1su1 (void)
+{
+ do_crypto_2op_1 (N_32, 0);
+}
+
+static void
+do_sha256su0 (void)
+{
+ do_crypto_2op_1 (N_32, 1);
+}
+
+static void
+do_crc32_1 (unsigned int poly, unsigned int sz)
+{
+ unsigned int Rd = inst.operands[0].reg;
+ unsigned int Rn = inst.operands[1].reg;
+ unsigned int Rm = inst.operands[2].reg;
+
+ set_it_insn_type (OUTSIDE_IT_INSN);
+ inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
+ inst.instruction |= LOW4 (Rn) << 16;
+ inst.instruction |= LOW4 (Rm);
+ inst.instruction |= sz << (thumb_mode ? 4 : 21);
+ inst.instruction |= poly << (thumb_mode ? 20 : 9);
+
+ if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
+ as_warn (UNPRED_REG ("r15"));
+ if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
+ as_warn (UNPRED_REG ("r13"));
+}
+
+static void
+do_crc32b (void)
+{
+ do_crc32_1 (0, 0);
+}
+
+static void
+do_crc32h (void)
+{
+ do_crc32_1 (0, 1);
+}
+
+static void
+do_crc32w (void)
+{
+ do_crc32_1 (0, 2);
+}
+
+static void
+do_crc32cb (void)
+{
+ do_crc32_1 (1, 0);
+}
+
+static void
+do_crc32ch (void)
+{
+ do_crc32_1 (1, 1);
+}
+
+static void
+do_crc32cw (void)
+{
+ do_crc32_1 (1, 2);
+}
+
\f
/* Overall per-instruction processing. */
{
if (inst.instruction >= 0x10000)
{
- as_warn (_("it blocks containing wide Thumb instructions are "
+ as_warn (_("IT blocks containing 32-bit Thumb instructions are "
"deprecated in ARMv8"));
now_it.warn_deprecated = TRUE;
}
{
if ((inst.instruction & p->mask) == p->pattern)
{
- as_warn (_("it blocks containing 16-bit Thumb intsructions "
+ as_warn (_("IT blocks containing 16-bit Thumb instructions "
"of the following class are deprecated in ARMv8: "
"%s"), p->description);
now_it.warn_deprecated = TRUE;
if (now_it.block_length > 1)
{
- as_warn (_("it blocks of more than one conditional instruction are "
- "deprecated in ARMv8"));
+ as_warn (_("IT blocks containing more than one conditional "
+ "instruction are deprecated in ARMv8"));
now_it.warn_deprecated = TRUE;
}
}
REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
- REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(SP_fiq,512|(13<<16),RNB),
+ REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
#define tC3w(mnem, aop, top, nops, ops, ae, te) \
TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
-/* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
- appear in the condition table. */
-#define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
- { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
- 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
-
-#define TxCM(m1, m2, op, top, nops, ops, ae, te) \
- TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
- TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
-
-#define TCM(m1,m2, aop, top, nops, ops, ae, te) \
- TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
-#define tCM(m1,m2, aop, top, nops, ops, ae, te) \
- TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
-
/* Mnemonic that cannot be conditionalized. The ARM condition-code
field is still 0xE. Many of the Thumb variants can be executed
conditionally, so this is checked separately. */
{ mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
THUMB_VARIANT, do_##ae, do_##te }
+/* Same as TUE but the encoding function for ARM and Thumb modes is the same.
+ Used by mnemonics that have very minimal differences in the encoding for
+ ARM and Thumb variants and can be handled in a common function. */
+#define TUEc(mnem, op, top, nops, ops, en) \
+ { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
+ THUMB_VARIANT, do_##en, do_##en }
+
/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
condition code field. */
#define TUF(mnem, op, top, nops, ops, ae, te) \
tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
- tCM("ld","sh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
- tCM("ld","sb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
+ tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
+ tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
#undef ARM_VARIANT
#define ARM_VARIANT & arm_ext_v4t_5
#undef THUMB_VARIANT
#define THUMB_VARIANT & arm_ext_v6_notm
TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
+ TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
UF(rfeib, 9900a00, 1, (RRw), rfe),
UF(rfeda, 8100a00, 1, (RRw), rfe),
TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
- UF(rfefa, 9900a00, 1, (RRw), rfe),
- UF(rfeea, 8100a00, 1, (RRw), rfe),
- TUF("rfeed", 9100a00, e810c000, 1, (RRw), rfe, rfe),
+ UF(rfefa, 8100a00, 1, (RRw), rfe),
+ TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
+ UF(rfeed, 9900a00, 1, (RRw), rfe),
TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
+ TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
+ TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
+ UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
UF(srsda, 8400500, 2, (oRRw, I31w), srs),
+ UF(srsed, 8400500, 2, (oRRw, I31w), srs),
TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
+ TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
/* ARM V6 not included in V7M (eg. integer SIMD). */
#undef THUMB_VARIANT
#undef THUMB_VARIANT
#define THUMB_VARIANT & arm_ext_barrier
- TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, t_barrier),
- TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, t_barrier),
- TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, t_barrier),
+ TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
+ TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
+ TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
/* ARM V7 instructions. */
#undef ARM_VARIANT
tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
+ TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
+ TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
+ ldrexd, t_ldrexd),
+ TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
+ TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
+ TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
+ stlex, t_stlex),
+ TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
+ strexd, t_strexd),
+ TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
+ stlex, t_stlex),
+ TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
+ stlex, t_stlex),
+ TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
+ TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
+ TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
+ TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
+ TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
+ TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
/* ARMv8 T32 only. */
#undef ARM_VARIANT
TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
+ /* FP for ARMv8. */
+#undef ARM_VARIANT
+#define ARM_VARIANT & fpu_vfp_ext_armv8
+#undef THUMB_VARIANT
+#define THUMB_VARIANT & fpu_vfp_ext_armv8
+
+ nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
+ nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
+ nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
+ nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
+ nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
+ nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
+ nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
+ nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
+ nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
+ nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
+ nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
+ nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
+ nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
+ nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
+ nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
+ nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
+ nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
+
+ /* Crypto v1 extensions. */
+#undef ARM_VARIANT
+#define ARM_VARIANT & fpu_crypto_ext_armv8
+#undef THUMB_VARIANT
+#define THUMB_VARIANT & fpu_crypto_ext_armv8
+
+ nUF(aese, _aes, 2, (RNQ, RNQ), aese),
+ nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
+ nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
+ nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
+ nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
+ nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
+ nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
+ nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
+ nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
+ nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
+ nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
+ nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
+ nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
+ nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
+
+#undef ARM_VARIANT
+#define ARM_VARIANT & crc_ext_armv8
+#undef THUMB_VARIANT
+#define THUMB_VARIANT & crc_ext_armv8
+ TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
+ TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
+ TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
+ TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
+ TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
+ TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
+
#undef ARM_VARIANT
#define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
#undef THUMB_VARIANT
nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
- nCEF(vcvtb, _vcvt, 2, (RVS, RVS), neon_cvtb),
- nCEF(vcvtt, _vcvt, 2, (RVS, RVS), neon_cvtt),
+ NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
+ NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
/* NOTE: All VMOV encoding is special-cased! */
#undef ARM_VARIANT
#undef THUMB_VARIANT
#undef TCE
-#undef TCM
#undef TUE
#undef TUF
#undef TCC
as_bad_where (fixP->fx_file, fixP->fx_line,
_("invalid literal constant: pool needs to be closer"));
else
- as_bad (_("bad immediate value for 8-bit offset (%ld)"),
- (long) value);
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("bad immediate value for 8-bit offset (%ld)"),
+ (long) value);
break;
}
thumb_bl_common:
-#ifdef OBJ_ELF
- if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
- && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
- fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
-#endif
-
if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
/* For a BLX instruction, make sure that the relocation is rounded up
to a word boundary. This follows the semantics of the instruction
which specifies that bit 1 of the target address will come from bit
1 of the base address. */
- value = (value + 1) & ~ 1;
+ value = (value + 3) & ~ 3;
+
+#ifdef OBJ_ELF
+ if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
+ && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
+ fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
+#endif
if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
{
ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
FPU_ARCH_NEON_VFP_V4,
"Cortex-A15"),
+ ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
+ "Cortex-A53"),
+ ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
+ "Cortex-A57"),
ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
"Cortex-R4F"),
ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
FPU_NONE, "Cortex-R5"),
+ ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
+ FPU_ARCH_VFP_V3D16,
+ "Cortex-R7"),
ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
/* Maverick */
ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
- FPU_ARCH_MAVERICK,
- "ARM920T"),
+ FPU_ARCH_MAVERICK, "ARM920T"),
+ /* Marvell processors. */
+ ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0),
+ FPU_ARCH_VFP_V3D16, NULL),
+
{ NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
};
#undef ARM_CPU_OPT
#define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
static const struct arm_option_extension_value_table arm_extensions[] =
{
+ ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)),
ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
ARM_FEATURE (ARM_EXT_V8, 0)),
ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,