(neon_cvt_mode): Add neon_cvt_mode_r.
(do_vrint_1): New function.
(do_vrint_x): Likewise.
(do_vrint_z): Likewise.
(do_vrint_r): Likewise.
(do_vrint_a): Likewise.
(do_vrint_n): Likewise.
(do_vrint_p): Likewise.
(do_vrint_m): Likewise.
(insns): Add VRINT instructions.
* gas/testsuite/gas/arm/armv8-a+fpv5.d: Update testcase.
* gas/testsuite/gas/arm/armv8-a+fpv5.s: Likewise.
* gas/testsuite/gas/arm/armv8-a+simdv3.d: Likewise.
* gas/testsuite/gas/arm/armv8-a+simdv3.s: Likewise.
* opcodes/arm-dis.c (coprocessor_opcodes): Add VRINT.
(neon_opcodes): Likewise.
2012-08-24 Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
+ * config/tc-arm.c (NEON_ENC_TAB): Add vrint entries.
+ (neon_cvt_mode): Add neon_cvt_mode_r.
+ (do_vrint_1): New function.
+ (do_vrint_x): Likewise.
+ (do_vrint_z): Likewise.
+ (do_vrint_r): Likewise.
+ (do_vrint_a): Likewise.
+ (do_vrint_n): Likewise.
+ (do_vrint_p): Likewise.
+ (do_vrint_m): Likewise.
+ (insns): Add VRINT instructions.
+
+2012-08-24 Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
+
* config/tc-arm.c (NEON_ENC_TAB): Add vcvta entry.
(neon_cvt_mode): New enumeration.
(do_vfp_nsyn_cvt_fpv8): New function.
X(vselgt, 0xe300a00, N_INV, N_INV), \
X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
- X(vcvta, 0xebc0a40, 0x3bb0000, N_INV)
+ X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
+ X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
+ X(vrinta, 0xeb80a40, 0x3ba0400, N_INV)
enum neon_opc
{
neon_cvt_mode_p,
neon_cvt_mode_m,
neon_cvt_mode_z,
- neon_cvt_mode_x
+ neon_cvt_mode_x,
+ neon_cvt_mode_r
};
/* Neon-syntax VFP conversions. */
neon_dyadic_misc (NT_untyped, N_F32, 0);
}
+static void
+do_vrint_1 (enum neon_cvt_mode mode)
+{
+ enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
+ struct neon_type_el et;
+
+ if (rs == NS_NULL)
+ return;
+
+ et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
+ if (et.type != NT_invtype)
+ {
+ /* VFP encodings. */
+ if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
+ || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
+ set_it_insn_type (OUTSIDE_IT_INSN);
+
+ NEON_ENCODE (FPV8, inst);
+ if (rs == NS_FF)
+ do_vfp_sp_monadic ();
+ else
+ do_vfp_dp_rd_rm ();
+
+ switch (mode)
+ {
+ case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
+ case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
+ case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
+ case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
+ case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
+ case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
+ case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
+ default: abort ();
+ }
+
+ inst.instruction |= (rs == NS_DD) << 8;
+ do_vfp_cond_or_thumb ();
+ }
+ else
+ {
+ /* Neon encodings (or something broken...). */
+ inst.error = NULL;
+ et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
+
+ if (et.type == NT_invtype)
+ return;
+
+ set_it_insn_type (OUTSIDE_IT_INSN);
+ NEON_ENCODE (FLOAT, inst);
+
+ if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
+ return;
+
+ inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
+ inst.instruction |= HI1 (inst.operands[0].reg) << 22;
+ inst.instruction |= LOW4 (inst.operands[1].reg);
+ inst.instruction |= HI1 (inst.operands[1].reg) << 5;
+ inst.instruction |= neon_quad (rs) << 6;
+ switch (mode)
+ {
+ case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
+ case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
+ case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
+ case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
+ case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
+ case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
+ case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
+ default: abort ();
+ }
+
+ if (thumb_mode)
+ inst.instruction |= 0xfc000000;
+ else
+ inst.instruction |= 0xf0000000;
+ }
+}
+
+static void
+do_vrintx (void)
+{
+ do_vrint_1 (neon_cvt_mode_x);
+}
+
+static void
+do_vrintz (void)
+{
+ do_vrint_1 (neon_cvt_mode_z);
+}
+
+static void
+do_vrintr (void)
+{
+ do_vrint_1 (neon_cvt_mode_r);
+}
+
+static void
+do_vrinta (void)
+{
+ do_vrint_1 (neon_cvt_mode_a);
+}
+
+static void
+do_vrintn (void)
+{
+ do_vrint_1 (neon_cvt_mode_n);
+}
+
+static void
+do_vrintp (void)
+{
+ do_vrint_1 (neon_cvt_mode_p);
+}
+
+static void
+do_vrintm (void)
+{
+ do_vrint_1 (neon_cvt_mode_m);
+}
+
\f
/* Overall per-instruction processing. */
nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
+ nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
+ nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
+ nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
+ nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
+ nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
+ nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
+ nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
#undef ARM_VARIANT
#define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
2012-08-24 Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
+ * gas/arm/armv8-a+fpv5.d: Update testcase.
+ * gas/arm/armv8-a+fpv5.s: Likewise.
+ * gas/arm/armv8-a+simdv3.d: Likewise.
+ * gas/arm/armv8-a+simdv3.s: Likewise.
+
+2012-08-24 Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
+
* gas/arm/armv8-a+fp.d: Update testcase.
* gas/arm/armv8-a+fp.s: Likewise.
* gas/arm/armv8-a+simd.d: Likewise.
0[0-9a-f]+ <[^>]+> fefd0b60 vcvtn.u32.f64 s1, d16
0[0-9a-f]+ <[^>]+> febefb4f vcvtp.u32.f64 s30, d15
0[0-9a-f]+ <[^>]+> fefffb6f vcvtm.u32.f64 s31, d31
+0[0-9a-f]+ <[^>]+> eeb60ac0 vrintz.f32.f32 s0, s0
+0[0-9a-f]+ <[^>]+> eef70a60 vrintx.f32.f32 s1, s1
+0[0-9a-f]+ <[^>]+> 0eb6fa4f vrintreq.f32.f32 s30, s30
+0[0-9a-f]+ <[^>]+> feb80a40 vrinta.f32.f32 s0, s0
+0[0-9a-f]+ <[^>]+> fef90a60 vrintn.f32.f32 s1, s1
+0[0-9a-f]+ <[^>]+> febafa4f vrintp.f32.f32 s30, s30
+0[0-9a-f]+ <[^>]+> fefbfa6f vrintm.f32.f32 s31, s31
+0[0-9a-f]+ <[^>]+> eeb60bc0 vrintz.f64.f64 d0, d0
+0[0-9a-f]+ <[^>]+> eeb71b41 vrintx.f64.f64 d1, d1
+0[0-9a-f]+ <[^>]+> 0ef6eb6e vrintreq.f64.f64 d30, d30
+0[0-9a-f]+ <[^>]+> feb80b40 vrinta.f64.f64 d0, d0
+0[0-9a-f]+ <[^>]+> feb91b41 vrintn.f64.f64 d1, d1
+0[0-9a-f]+ <[^>]+> fefaeb6e vrintp.f64.f64 d30, d30
+0[0-9a-f]+ <[^>]+> fefbfb6f vrintm.f64.f64 d31, d31
0[0-9a-f]+ <[^>]+> fe00 0a00 vseleq.f32 s0, s0, s0
0[0-9a-f]+ <[^>]+> fe50 0aa0 vselvs.f32 s1, s1, s1
0[0-9a-f]+ <[^>]+> fe2f fa0f vselge.f32 s30, s30, s30
0[0-9a-f]+ <[^>]+> fefd 0b60 vcvtn.u32.f64 s1, d16
0[0-9a-f]+ <[^>]+> febe fb4f vcvtp.u32.f64 s30, d15
0[0-9a-f]+ <[^>]+> feff fb6f vcvtm.u32.f64 s31, d31
+0[0-9a-f]+ <[^>]+> eeb6 0ac0 vrintz.f32.f32 s0, s0
+0[0-9a-f]+ <[^>]+> eef7 0a60 vrintx.f32.f32 s1, s1
+0[0-9a-f]+ <[^>]+> eeb6 fa4f vrintr.f32.f32 s30, s30
+0[0-9a-f]+ <[^>]+> feb8 0a40 vrinta.f32.f32 s0, s0
+0[0-9a-f]+ <[^>]+> fef9 0a60 vrintn.f32.f32 s1, s1
+0[0-9a-f]+ <[^>]+> feba fa4f vrintp.f32.f32 s30, s30
+0[0-9a-f]+ <[^>]+> fefb fa6f vrintm.f32.f32 s31, s31
+0[0-9a-f]+ <[^>]+> eeb6 0bc0 vrintz.f64.f64 d0, d0
+0[0-9a-f]+ <[^>]+> eeb7 1b41 vrintx.f64.f64 d1, d1
+0[0-9a-f]+ <[^>]+> eef6 eb6e vrintr.f64.f64 d30, d30
+0[0-9a-f]+ <[^>]+> feb8 0b40 vrinta.f64.f64 d0, d0
+0[0-9a-f]+ <[^>]+> feb9 1b41 vrintn.f64.f64 d1, d1
+0[0-9a-f]+ <[^>]+> fefa eb6e vrintp.f64.f64 d30, d30
+0[0-9a-f]+ <[^>]+> fefb fb6f vrintm.f64.f64 d31, d31
vcvtn.s32.f64 s1, d16
vcvtp.u32.f64 s30, d15
vcvtm.u32.f64 s31, d31
+ vrintz.f32.f32 s0, s0
+ vrintx.f32.f32 s1, s1
+ vrintreq.f32.f32 s30, s30
+ vrinta.f32.f32 s0, s0
+ vrintn.f32.f32 s1, s1
+ vrintp.f32.f32 s30, s30
+ vrintm.f32.f32 s31, s31
+ vrintz.f64.f64 d0, d0
+ vrintx.f64.f64 d1, d1
+ vrintreq.f64.f64 d30, d30
+ vrinta.f64.f64 d0, d0
+ vrintn.f64.f64 d1, d1
+ vrintp.f64.f64 d30, d30
+ vrintm.f64.f64 d31, d31
.thumb
vseleq.f32 s0, s0, s0
vcvtn.s32.f64 s1, d16
vcvtp.u32.f64 s30, d15
vcvtm.u32.f64 s31, d31
+ vrintz.f32.f32 s0, s0
+ vrintx.f32.f32 s1, s1
+ vrintr.f32.f32 s30, s30
+ vrinta.f32.f32 s0, s0
+ vrintn.f32.f32 s1, s1
+ vrintp.f32.f32 s30, s30
+ vrintm.f32.f32 s31, s31
+ vrintz.f64.f64 d0, d0
+ vrintx.f64.f64 d1, d1
+ vrintr.f64.f64 d30, d30
+ vrinta.f64.f64 d0, d0
+ vrintn.f64.f64 d1, d1
+ vrintp.f64.f64 d30, d30
+ vrintm.f64.f64 d31, d31
0[0-9a-f]+ <[^>]+> f3fb0160 vcvtn.s32.f32 q8, q8
0[0-9a-f]+ <[^>]+> f3bbe2ce vcvtp.u32.f32 q7, q7
0[0-9a-f]+ <[^>]+> f3fbe3ee vcvtm.u32.f32 q15, q15
+0[0-9a-f]+ <[^>]+> f3ba0500 vrinta.f32.f32 d0, d0
+0[0-9a-f]+ <[^>]+> f3fa0420 vrintn.f32.f32 d16, d16
+0[0-9a-f]+ <[^>]+> f3baf68f vrintm.f32.f32 d15, d15
+0[0-9a-f]+ <[^>]+> f3faf7af vrintp.f32.f32 d31, d31
+0[0-9a-f]+ <[^>]+> f3ba04af vrintx.f32.f32 d0, d31
+0[0-9a-f]+ <[^>]+> f3fa058f vrintz.f32.f32 d16, d15
+0[0-9a-f]+ <[^>]+> f3ba0540 vrinta.f32.f32 q0, q0
+0[0-9a-f]+ <[^>]+> f3fa0460 vrintn.f32.f32 q8, q8
+0[0-9a-f]+ <[^>]+> f3bae6ce vrintm.f32.f32 q7, q7
+0[0-9a-f]+ <[^>]+> f3fae7ee vrintp.f32.f32 q15, q15
+0[0-9a-f]+ <[^>]+> f3ba04ee vrintx.f32.f32 q0, q15
+0[0-9a-f]+ <[^>]+> f3fa05ce vrintz.f32.f32 q8, q7
0[0-9a-f]+ <[^>]+> ff00 0f10 vmaxnm.f32 d0, d0, d0
0[0-9a-f]+ <[^>]+> ff40 0fb0 vmaxnm.f32 d16, d16, d16
0[0-9a-f]+ <[^>]+> ff0f ff1f vmaxnm.f32 d15, d15, d15
0[0-9a-f]+ <[^>]+> fffb 0160 vcvtn.s32.f32 q8, q8
0[0-9a-f]+ <[^>]+> ffbb e2ce vcvtp.u32.f32 q7, q7
0[0-9a-f]+ <[^>]+> fffb e3ee vcvtm.u32.f32 q15, q15
+0[0-9a-f]+ <[^>]+> ffba 0500 vrinta.f32.f32 d0, d0
+0[0-9a-f]+ <[^>]+> fffa 0420 vrintn.f32.f32 d16, d16
+0[0-9a-f]+ <[^>]+> ffba f68f vrintm.f32.f32 d15, d15
+0[0-9a-f]+ <[^>]+> fffa f7af vrintp.f32.f32 d31, d31
+0[0-9a-f]+ <[^>]+> ffba 04af vrintx.f32.f32 d0, d31
+0[0-9a-f]+ <[^>]+> fffa 058f vrintz.f32.f32 d16, d15
+0[0-9a-f]+ <[^>]+> ffba 0540 vrinta.f32.f32 q0, q0
+0[0-9a-f]+ <[^>]+> fffa 0460 vrintn.f32.f32 q8, q8
+0[0-9a-f]+ <[^>]+> ffba e6ce vrintm.f32.f32 q7, q7
+0[0-9a-f]+ <[^>]+> fffa e7ee vrintp.f32.f32 q15, q15
+0[0-9a-f]+ <[^>]+> ffba 04ee vrintx.f32.f32 q0, q15
+0[0-9a-f]+ <[^>]+> fffa 05ce vrintz.f32.f32 q8, q7
vcvtn.s32.f32 q8, q8
vcvtp.u32.f32 q7, q7
vcvtm.u32.f32 q15, q15
+ vrinta.f32.f32 d0, d0
+ vrintn.f32.f32 d16, d16
+ vrintm.f32.f32 d15, d15
+ vrintp.f32.f32 d31, d31
+ vrintx.f32.f32 d0, d31
+ vrintz.f32.f32 d16, d15
+ vrinta.f32.f32 q0, q0
+ vrintn.f32.f32 q8, q8
+ vrintm.f32.f32 q7, q7
+ vrintp.f32.f32 q15, q15
+ vrintx.f32.f32 q0, q15
+ vrintz.f32.f32 q8, q7
.thumb
vmaxnm.f32 d0, d0, d0
vcvtn.s32.f32 q8, q8
vcvtp.u32.f32 q7, q7
vcvtm.u32.f32 q15, q15
+ vrinta.f32.f32 d0, d0
+ vrintn.f32.f32 d16, d16
+ vrintm.f32.f32 d15, d15
+ vrintp.f32.f32 d31, d31
+ vrintx.f32.f32 d0, d31
+ vrintz.f32.f32 d16, d15
+ vrinta.f32.f32 q0, q0
+ vrintn.f32.f32 q8, q8
+ vrintm.f32.f32 q7, q7
+ vrintp.f32.f32 q15, q15
+ vrintx.f32.f32 q0, q15
+ vrintz.f32.f32 q8, q7
+2012-08-24 Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
+
+ * arm-dis.c (coprocessor_opcodes): Add VRINT.
+ (neon_opcodes): Likewise.
+
2012-08-24 Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
* arm-dis.c (coprocessor_opcodes): Add support for new VCVT
{FPU_VFP_EXT_ARMV8, 0xfe800b40, 0xffb00f40, "vminnm%u.f64\t%z1, %z2, %z0"},
{FPU_VFP_EXT_ARMV8, 0xfebc0a40, 0xffbc0f50, "vcvt%16-17?mpna%u.%7?su32.f32\t%y1, %y0"},
{FPU_VFP_EXT_ARMV8, 0xfebc0b40, 0xffbc0f50, "vcvt%16-17?mpna%u.%7?su32.f64\t%y1, %z0"},
+ {FPU_VFP_EXT_ARMV8, 0x0eb60a40, 0x0fbe0f50, "vrint%7,16??xzr%c.f32.f32\t%y1, %y0"},
+ {FPU_VFP_EXT_ARMV8, 0x0eb60b40, 0x0fbe0f50, "vrint%7,16??xzr%c.f64.f64\t%z1, %z0"},
+ {FPU_VFP_EXT_ARMV8, 0xfeb80a40, 0xffbc0f50, "vrint%16-17?mpna%u.f32.f32\t%y1, %y0"},
+ {FPU_VFP_EXT_ARMV8, 0xfeb80b40, 0xffbc0f50, "vrint%16-17?mpna%u.f64.f64\t%z1, %z0"},
/* Generic coprocessor instructions. */
{ 0, SENTINEL_GENERIC_START, 0, "" },
{FPU_NEON_EXT_FMA, 0xf2200c10, 0xffa00f10, "vfms%c.f%20U0\t%12-15,22R, %16-19,7R, %0-3,5R"},
/* Two registers, miscellaneous. */
+ {FPU_NEON_EXT_ARMV8, 0xf3ba0400, 0xffbf0c10, "vrint%7-9?p?m?zaxn%u.f32.f32\t%12-15,22R, %0-3,5R"},
{FPU_NEON_EXT_ARMV8, 0xf3bb0000, 0xffbf0c10, "vcvt%8-9?mpna%u.%7?us32.f32\t%12-15,22R, %0-3,5R"},
{FPU_NEON_EXT_V1, 0xf2880a10, 0xfebf0fd0, "vmovl%c.%24?us8\t%12-15,22Q, %0-3,5D"},
{FPU_NEON_EXT_V1, 0xf2900a10, 0xfebf0fd0, "vmovl%c.%24?us16\t%12-15,22Q, %0-3,5D"},