2014-07-17 Yvan Roux <yvan.roux@linaro.org>
+ Backport from trunk r211174.
+ 2014-06-03 Alan Lawrence <alan.lawrence@arm.com>
+
+ * config/aarch64/aarch64-simd.md (aarch64_rev<REVERSE:rev-op><mode>):
+ New pattern.
+ * config/aarch64/aarch64.c (aarch64_evpc_rev): New function.
+ (aarch64_expand_vec_perm_const_1): Add call to aarch64_evpc_rev.
+ * config/aarch64/iterators.md (REVERSE): New iterator.
+ (UNSPEC_REV64, UNSPEC_REV32, UNSPEC_REV16): New enum elements.
+ (rev_op): New int_attribute.
+ * config/aarch64/arm_neon.h (vrev16_p8, vrev16_s8, vrev16_u8,
+ vrev16q_p8, vrev16q_s8, vrev16q_u8, vrev32_p8, vrev32_p16, vrev32_s8,
+ vrev32_s16, vrev32_u8, vrev32_u16, vrev32q_p8, vrev32q_p16, vrev32q_s8,
+ vrev32q_s16, vrev32q_u8, vrev32q_u16, vrev64_f32, vrev64_p8,
+ vrev64_p16, vrev64_s8, vrev64_s16, vrev64_s32, vrev64_u8, vrev64_u16,
+ vrev64_u32, vrev64q_f32, vrev64q_p8, vrev64q_p16, vrev64q_s8,
+ vrev64q_s16, vrev64q_s32, vrev64q_u8, vrev64q_u16, vrev64q_u32):
+ Replace temporary __asm__ with __builtin_shuffle.
+
+2014-07-17 Yvan Roux <yvan.roux@linaro.org>
+
Backport from trunk r210216, r210218, r210219.
2014-05-08 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
}
)
+(define_insn "aarch64_rev<REVERSE:rev_op><mode>"
+ [(set (match_operand:VALL 0 "register_operand" "=w")
+ (unspec:VALL [(match_operand:VALL 1 "register_operand" "w")]
+ REVERSE))]
+ "TARGET_SIMD"
+ "rev<REVERSE:rev_op>\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_rev<q>")]
+)
+
(define_insn "aarch64_st2<mode>_dreg"
[(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
(unspec:TI [(match_operand:OI 1 "register_operand" "w")
return true;
}
+/* Recognize patterns for the REV insns. */
+
+static bool
+aarch64_evpc_rev (struct expand_vec_perm_d *d)
+{
+ unsigned int i, j, diff, nelt = d->nelt;
+ rtx (*gen) (rtx, rtx);
+
+ if (!d->one_vector_p)
+ return false;
+
+ diff = d->perm[0];
+ switch (diff)
+ {
+ case 7:
+ switch (d->vmode)
+ {
+ case V16QImode: gen = gen_aarch64_rev64v16qi; break;
+ case V8QImode: gen = gen_aarch64_rev64v8qi; break;
+ default:
+ return false;
+ }
+ break;
+ case 3:
+ switch (d->vmode)
+ {
+ case V16QImode: gen = gen_aarch64_rev32v16qi; break;
+ case V8QImode: gen = gen_aarch64_rev32v8qi; break;
+ case V8HImode: gen = gen_aarch64_rev64v8hi; break;
+ case V4HImode: gen = gen_aarch64_rev64v4hi; break;
+ default:
+ return false;
+ }
+ break;
+ case 1:
+ switch (d->vmode)
+ {
+ case V16QImode: gen = gen_aarch64_rev16v16qi; break;
+ case V8QImode: gen = gen_aarch64_rev16v8qi; break;
+ case V8HImode: gen = gen_aarch64_rev32v8hi; break;
+ case V4HImode: gen = gen_aarch64_rev32v4hi; break;
+ case V4SImode: gen = gen_aarch64_rev64v4si; break;
+ case V2SImode: gen = gen_aarch64_rev64v2si; break;
+ case V4SFmode: gen = gen_aarch64_rev64v4sf; break;
+ case V2SFmode: gen = gen_aarch64_rev64v2sf; break;
+ default:
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ for (i = 0; i < nelt ; i += diff + 1)
+ for (j = 0; j <= diff; j += 1)
+ {
+ /* This is guaranteed to be true as the value of diff
+ is 7, 3, 1 and we should have enough elements in the
+ queue to generate this. Getting a vector mask with a
+ value of diff other than these values implies that
+ something is wrong by the time we get here. */
+ gcc_assert (i + j < nelt);
+ if (d->perm[i + j] != i + diff - j)
+ return false;
+ }
+
+ /* Success! */
+ if (d->testing_p)
+ return true;
+
+ emit_insn (gen (d->target, d->op0));
+ return true;
+}
+
static bool
aarch64_evpc_dup (struct expand_vec_perm_d *d)
{
if (TARGET_SIMD)
{
- if (aarch64_evpc_ext (d))
+ if (aarch64_evpc_rev (d))
+ return true;
+ else if (aarch64_evpc_ext (d))
return true;
else if (aarch64_evpc_zip (d))
return true;
return result;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vrev16_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("rev16 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrev16_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("rev16 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrev16_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("rev16 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vrev16q_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("rev16 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrev16q_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("rev16 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrev16q_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("rev16 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vrev32_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("rev32 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vrev32_p16 (poly16x4_t a)
-{
- poly16x4_t result;
- __asm__ ("rev32 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrev32_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("rev32 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrev32_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("rev32 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrev32_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("rev32 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrev32_u16 (uint16x4_t a)
-{
- uint16x4_t result;
- __asm__ ("rev32 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vrev32q_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("rev32 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vrev32q_p16 (poly16x8_t a)
-{
- poly16x8_t result;
- __asm__ ("rev32 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrev32q_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("rev32 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrev32q_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("rev32 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrev32q_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("rev32 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrev32q_u16 (uint16x8_t a)
-{
- uint16x8_t result;
- __asm__ ("rev32 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrev64_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("rev64 %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vrev64_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("rev64 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vrev64_p16 (poly16x4_t a)
-{
- poly16x4_t result;
- __asm__ ("rev64 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrev64_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("rev64 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrev64_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("rev64 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vrev64_s32 (int32x2_t a)
-{
- int32x2_t result;
- __asm__ ("rev64 %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrev64_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("rev64 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrev64_u16 (uint16x4_t a)
-{
- uint16x4_t result;
- __asm__ ("rev64 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrev64_u32 (uint32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("rev64 %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrev64q_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("rev64 %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vrev64q_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("rev64 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vrev64q_p16 (poly16x8_t a)
-{
- poly16x8_t result;
- __asm__ ("rev64 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrev64q_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("rev64 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrev64q_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("rev64 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vrev64q_s32 (int32x4_t a)
-{
- int32x4_t result;
- __asm__ ("rev64 %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrev64q_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("rev64 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrev64q_u16 (uint16x8_t a)
-{
- uint16x8_t result;
- __asm__ ("rev64 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrev64q_u32 (uint32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("rev64 %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
#define vrshrn_high_n_s16(a, b, c) \
__extension__ \
({ \
return __builtin_aarch64_frecpxdf (__a);
}
+
+/* vrev */
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev16_p8 (poly8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev16_s8 (int8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev16_u8 (uint8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev16q_p8 (poly8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev16q_s8 (int8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev16q_u8 (uint8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev32_p8 (poly8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev32_p16 (poly16x4_t a)
+{
+ return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev32_s8 (int8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev32_s16 (int16x4_t a)
+{
+ return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev32_u8 (uint8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev32_u16 (uint16x4_t a)
+{
+ return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev32q_p8 (poly8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev32q_p16 (poly16x8_t a)
+{
+ return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev32q_s8 (int8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev32q_s16 (int16x8_t a)
+{
+ return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev32q_u8 (uint8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev32q_u16 (uint16x8_t a)
+{
+ return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrev64_f32 (float32x2_t a)
+{
+ return __builtin_shuffle (a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev64_p8 (poly8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev64_p16 (poly16x4_t a)
+{
+ return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev64_s8 (int8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev64_s16 (int16x4_t a)
+{
+ return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrev64_s32 (int32x2_t a)
+{
+ return __builtin_shuffle (a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev64_u8 (uint8x8_t a)
+{
+ return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev64_u16 (uint16x4_t a)
+{
+ return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrev64_u32 (uint32x2_t a)
+{
+ return __builtin_shuffle (a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrev64q_f32 (float32x4_t a)
+{
+ return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev64q_p8 (poly8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev64q_p16 (poly16x8_t a)
+{
+ return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev64q_s8 (int8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev64q_s16 (int16x8_t a)
+{
+ return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrev64q_s32 (int32x4_t a)
+{
+ return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev64q_u8 (uint8x16_t a)
+{
+ return __builtin_shuffle (a,
+ (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev64q_u16 (uint16x8_t a)
+{
+ return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrev64q_u32 (uint32x4_t a)
+{
+ return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
/* vrnd */
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
UNSPEC_TRN1 ; Used in vector permute patterns.
UNSPEC_TRN2 ; Used in vector permute patterns.
UNSPEC_EXT ; Used in aarch64-simd.md.
+ UNSPEC_REV64 ; Used in vector reverse patterns (permute).
+ UNSPEC_REV32 ; Used in vector reverse patterns (permute).
+ UNSPEC_REV16 ; Used in vector reverse patterns (permute).
UNSPEC_AESE ; Used in aarch64-simd.md.
UNSPEC_AESD ; Used in aarch64-simd.md.
UNSPEC_AESMC ; Used in aarch64-simd.md.
UNSPEC_TRN1 UNSPEC_TRN2
UNSPEC_UZP1 UNSPEC_UZP2])
+(define_int_iterator REVERSE [UNSPEC_REV64 UNSPEC_REV32 UNSPEC_REV16])
+
(define_int_iterator FRINT [UNSPEC_FRINTZ UNSPEC_FRINTP UNSPEC_FRINTM
UNSPEC_FRINTN UNSPEC_FRINTI UNSPEC_FRINTX
UNSPEC_FRINTA])
(UNSPEC_TRN1 "trn") (UNSPEC_TRN2 "trn")
(UNSPEC_UZP1 "uzp") (UNSPEC_UZP2 "uzp")])
+; op code for REV instructions (size within which elements are reversed).
+(define_int_attr rev_op [(UNSPEC_REV64 "64") (UNSPEC_REV32 "32")
+ (UNSPEC_REV16 "16")])
+
(define_int_attr perm_hilo [(UNSPEC_ZIP1 "1") (UNSPEC_ZIP2 "2")
(UNSPEC_TRN1 "1") (UNSPEC_TRN2 "2")
(UNSPEC_UZP1 "1") (UNSPEC_UZP2 "2")])
+2014-07-17 Yvan Roux <yvan.roux@linaro.org>
+
+ Backport from trunk r210153.
+ 2014-05-07 Alan Lawrence <alan.lawrence@arm.com>
+
+ * gcc.target/aarch64/simd/vrev16p8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev16p8.x: New file.
+ * gcc.target/aarch64/simd/vrev16qp8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev16qp8.x: New file.
+ * gcc.target/aarch64/simd/vrev16qs8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev16qs8.x: New file.
+ * gcc.target/aarch64/simd/vrev16qu8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev16qu8.x: New file.
+ * gcc.target/aarch64/simd/vrev16s8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev16s8.x: New file.
+ * gcc.target/aarch64/simd/vrev16u8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev16u8.x: New file.
+ * gcc.target/aarch64/simd/vrev32p16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32p16.x: New file.
+ * gcc.target/aarch64/simd/vrev32p8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32p8.x: New file.
+ * gcc.target/aarch64/simd/vrev32qp16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32qp16.x: New file.
+ * gcc.target/aarch64/simd/vrev32qp8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32qp8.x: New file.
+ * gcc.target/aarch64/simd/vrev32qs16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32qs16.x: New file.
+ * gcc.target/aarch64/simd/vrev32qs8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32qs8.x: New file.
+ * gcc.target/aarch64/simd/vrev32qu16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32qu16.x: New file.
+ * gcc.target/aarch64/simd/vrev32qu8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32qu8.x: New file.
+ * gcc.target/aarch64/simd/vrev32s16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32s16.x: New file.
+ * gcc.target/aarch64/simd/vrev32s8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32s8.x: New file.
+ * gcc.target/aarch64/simd/vrev32u16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32u16.x: New file.
+ * gcc.target/aarch64/simd/vrev32u8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev32u8.x: New file.
+ * gcc.target/aarch64/simd/vrev64f32_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64f32.x: New file.
+ * gcc.target/aarch64/simd/vrev64p16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64p16.x: New file.
+ * gcc.target/aarch64/simd/vrev64p8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64p8.x: New file.
+ * gcc.target/aarch64/simd/vrev64qf32_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qf32.x: New file.
+ * gcc.target/aarch64/simd/vrev64qp16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qp16.x: New file.
+ * gcc.target/aarch64/simd/vrev64qp8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qp8.x: New file.
+ * gcc.target/aarch64/simd/vrev64qs16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qs16.x: New file.
+ * gcc.target/aarch64/simd/vrev64qs32_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qs32.x: New file.
+ * gcc.target/aarch64/simd/vrev64qs8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qs8.x: New file.
+ * gcc.target/aarch64/simd/vrev64qu16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qu16.x: New file.
+ * gcc.target/aarch64/simd/vrev64qu32_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qu32.x: New file.
+ * gcc.target/aarch64/simd/vrev64qu8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64qu8.x: New file.
+ * gcc.target/aarch64/simd/vrev64s16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64s16.x: New file.
+ * gcc.target/aarch64/simd/vrev64s32_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64s32.x: New file.
+ * gcc.target/aarch64/simd/vrev64s8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64s8.x: New file.
+ * gcc.target/aarch64/simd/vrev64u16_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64u16.x: New file.
+ * gcc.target/aarch64/simd/vrev64u32_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64u32.x: New file.
+ * gcc.target/aarch64/simd/vrev64u8_1.c: New file.
+ * gcc.target/aarch64/simd/vrev64u8.x: New file.
+
2014-07-16 Yvan Roux <yvan.roux@linaro.org>
Backport from trunk r210148, r210151, r210422.
--- /dev/null
+extern void abort (void);
+
+poly8x8_t
+test_vrev16p8 (poly8x8_t _arg)
+{
+ return vrev16_p8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly8x8_t reversed = test_vrev16p8 (inorder);
+ poly8x8_t expected = {2, 1, 4, 3, 6, 5, 8, 7};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev16_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev16p8.x"
+
+/* { dg-final { scan-assembler-times "rev16\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly8x16_t
+test_vrev16qp8 (poly8x16_t _arg)
+{
+ return vrev16q_p8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ poly8x16_t reversed = test_vrev16qp8 (inorder);
+ poly8x16_t expected = {2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev16q_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev16qp8.x"
+
+/* { dg-final { scan-assembler-times "rev16\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int8x16_t
+test_vrev16qs8 (int8x16_t _arg)
+{
+ return vrev16q_s8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ int8x16_t reversed = test_vrev16qs8 (inorder);
+ int8x16_t expected = {2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev16q_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev16qs8.x"
+
+/* { dg-final { scan-assembler-times "rev16\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint8x16_t
+test_vrev16qu8 (uint8x16_t _arg)
+{
+ return vrev16q_u8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8x16_t reversed = test_vrev16qu8 (inorder);
+ uint8x16_t expected = {2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev16q_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev16qu8.x"
+
+/* { dg-final { scan-assembler-times "rev16\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int8x8_t
+test_vrev16s8 (int8x8_t _arg)
+{
+ return vrev16_s8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ int8x8_t reversed = test_vrev16s8 (inorder);
+ int8x8_t expected = {2, 1, 4, 3, 6, 5, 8, 7};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev16_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev16s8.x"
+
+/* { dg-final { scan-assembler-times "rev16\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint8x8_t
+test_vrev16u8 (uint8x8_t _arg)
+{
+ return vrev16_u8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint8x8_t reversed = test_vrev16u8 (inorder);
+ uint8x8_t expected = {2, 1, 4, 3, 6, 5, 8, 7};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev16_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev16u8.x"
+
+/* { dg-final { scan-assembler-times "rev16\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly16x4_t
+test_vrev32p16 (poly16x4_t _arg)
+{
+ return vrev32_p16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly16x4_t inorder = {1, 2, 3, 4};
+ poly16x4_t reversed = test_vrev32p16 (inorder);
+ poly16x4_t expected = {2, 1, 4, 3};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32_p16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32p16.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.4h, ?v\[0-9\]+.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly8x8_t
+test_vrev32p8 (poly8x8_t _arg)
+{
+ return vrev32_p8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly8x8_t reversed = test_vrev32p8 (inorder);
+ poly8x8_t expected = {4, 3, 2, 1, 8, 7, 6, 5};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32p8.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly16x8_t
+test_vrev32qp16 (poly16x8_t _arg)
+{
+ return vrev32q_p16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly16x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly16x8_t reversed = test_vrev32qp16 (inorder);
+ poly16x8_t expected = {2, 1, 4, 3, 6, 5, 8, 7};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32q_p16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32qp16.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.8h, ?v\[0-9\]+.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly8x16_t
+test_vrev32qp8 (poly8x16_t _arg)
+{
+ return vrev32q_p8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ poly8x16_t reversed = test_vrev32qp8 (inorder);
+ poly8x16_t expected = {4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32q_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32qp8.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int16x8_t
+test_vrev32qs16 (int16x8_t _arg)
+{
+ return vrev32q_s16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int16x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ int16x8_t reversed = test_vrev32qs16 (inorder);
+ int16x8_t expected = {2, 1, 4, 3, 6, 5, 8, 7};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32q_s16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32qs16.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.8h, ?v\[0-9\]+.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int8x16_t
+test_vrev32qs8 (int8x16_t _arg)
+{
+ return vrev32q_s8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ int8x16_t reversed = test_vrev32qs8 (inorder);
+ int8x16_t expected = {4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32q_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32qs8.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint16x8_t
+test_vrev32qu16 (uint16x8_t _arg)
+{
+ return vrev32q_u16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint16x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint16x8_t reversed = test_vrev32qu16 (inorder);
+ uint16x8_t expected = {2, 1, 4, 3, 6, 5, 8, 7};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32q_u16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32qu16.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.8h, ?v\[0-9\]+.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint8x16_t
+test_vrev32qu8 (uint8x16_t _arg)
+{
+ return vrev32q_u8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8x16_t reversed = test_vrev32qu8 (inorder);
+ uint8x16_t expected = {4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32q_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32qu8.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int16x4_t
+test_vrev32s16 (int16x4_t _arg)
+{
+ return vrev32_s16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int16x4_t inorder = {1, 2, 3, 4};
+ int16x4_t reversed = test_vrev32s16 (inorder);
+ int16x4_t expected = {2, 1, 4, 3};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32_s16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32s16.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.4h, ?v\[0-9\]+.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int8x8_t
+test_vrev32s8 (int8x8_t _arg)
+{
+ return vrev32_s8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ int8x8_t reversed = test_vrev32s8 (inorder);
+ int8x8_t expected = {4, 3, 2, 1, 8, 7, 6, 5};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32s8.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint16x4_t
+test_vrev32u16 (uint16x4_t _arg)
+{
+ return vrev32_u16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint16x4_t inorder = {1, 2, 3, 4};
+ uint16x4_t reversed = test_vrev32u16 (inorder);
+ uint16x4_t expected = {2, 1, 4, 3};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32_u16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32u16.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.4h, ?v\[0-9\]+.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint8x8_t
+test_vrev32u8 (uint8x8_t _arg)
+{
+ return vrev32_u8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint8x8_t reversed = test_vrev32u8 (inorder);
+ uint8x8_t expected = {4, 3, 2, 1, 8, 7, 6, 5};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev32_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev32u8.x"
+
+/* { dg-final { scan-assembler-times "rev32\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+float32x2_t
+test_vrev64f32 (float32x2_t _arg)
+{
+ return vrev64_f32 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ float32x2_t inorder = {1, 2};
+ float32x2_t reversed = test_vrev64f32 (inorder);
+ float32x2_t expected = {2, 1};
+
+ for (i = 0; i < 2; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_f32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64f32.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.2s, ?v\[0-9\]+.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly16x4_t
+test_vrev64p16 (poly16x4_t _arg)
+{
+ return vrev64_p16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly16x4_t inorder = {1, 2, 3, 4};
+ poly16x4_t reversed = test_vrev64p16 (inorder);
+ poly16x4_t expected = {4, 3, 2, 1};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_p16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64p16.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.4h, ?v\[0-9\]+.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly8x8_t
+test_vrev64p8 (poly8x8_t _arg)
+{
+ return vrev64_p8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly8x8_t reversed = test_vrev64p8 (inorder);
+ poly8x8_t expected = {8, 7, 6, 5, 4, 3, 2, 1};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64p8.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+float32x4_t
+test_vrev64qf32 (float32x4_t _arg)
+{
+ return vrev64q_f32 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ float32x4_t inorder = {1, 2, 3, 4};
+ float32x4_t reversed = test_vrev64qf32 (inorder);
+ float32x4_t expected = {2, 1, 4, 3};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_f32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qf32.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.4s, ?v\[0-9\]+.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly16x8_t
+test_vrev64qp16 (poly16x8_t _arg)
+{
+ return vrev64q_p16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly16x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly16x8_t reversed = test_vrev64qp16 (inorder);
+ poly16x8_t expected = {4, 3, 2, 1, 8, 7, 6, 5};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_p16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qp16.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.8h, ?v\[0-9\]+.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+poly8x16_t
+test_vrev64qp8 (poly8x16_t _arg)
+{
+ return vrev64q_p8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ poly8x16_t reversed = test_vrev64qp8 (inorder);
+ poly8x16_t expected = {8, 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, 9};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qp8.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int16x8_t
+test_vrev64qs16 (int16x8_t _arg)
+{
+ return vrev64q_s16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int16x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ int16x8_t reversed = test_vrev64qs16 (inorder);
+ int16x8_t expected = {4, 3, 2, 1, 8, 7, 6, 5};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_s16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qs16.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.8h, ?v\[0-9\]+.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int32x4_t
+test_vrev64qs32 (int32x4_t _arg)
+{
+ return vrev64q_s32 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int32x4_t inorder = {1, 2, 3, 4};
+ int32x4_t reversed = test_vrev64qs32 (inorder);
+ int32x4_t expected = {2, 1, 4, 3};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_s32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qs32.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.4s, ?v\[0-9\]+.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int8x16_t
+test_vrev64qs8 (int8x16_t _arg)
+{
+ return vrev64q_s8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ int8x16_t reversed = test_vrev64qs8 (inorder);
+ int8x16_t expected = {8, 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, 9};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qs8.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint16x8_t
+test_vrev64qu16 (uint16x8_t _arg)
+{
+ return vrev64q_u16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint16x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint16x8_t reversed = test_vrev64qu16 (inorder);
+ uint16x8_t expected = {4, 3, 2, 1, 8, 7, 6, 5};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_u16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qu16.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.8h, ?v\[0-9\]+.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint32x4_t
+test_vrev64qu32 (uint32x4_t _arg)
+{
+ return vrev64q_u32 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint32x4_t inorder = {1, 2, 3, 4};
+ uint32x4_t reversed = test_vrev64qu32 (inorder);
+ uint32x4_t expected = {2, 1, 4, 3};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_u32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qu32.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.4s, ?v\[0-9\]+.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint8x16_t
+test_vrev64qu8 (uint8x16_t _arg)
+{
+ return vrev64q_u8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8x16_t inorder = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8x16_t reversed = test_vrev64qu8 (inorder);
+ uint8x16_t expected = {8, 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, 9};
+
+ for (i = 0; i < 16; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64q_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64qu8.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.16b, ?v\[0-9\]+.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int16x4_t
+test_vrev64s16 (int16x4_t _arg)
+{
+ return vrev64_s16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int16x4_t inorder = {1, 2, 3, 4};
+ int16x4_t reversed = test_vrev64s16 (inorder);
+ int16x4_t expected = {4, 3, 2, 1};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_s16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64s16.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.4h, ?v\[0-9\]+.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int32x2_t
+test_vrev64s32 (int32x2_t _arg)
+{
+ return vrev64_s32 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int32x2_t inorder = {1, 2};
+ int32x2_t reversed = test_vrev64s32 (inorder);
+ int32x2_t expected = {2, 1};
+
+ for (i = 0; i < 2; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_s32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64s32.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.2s, ?v\[0-9\]+.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+int8x8_t
+test_vrev64s8 (int8x8_t _arg)
+{
+ return vrev64_s8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ int8x8_t reversed = test_vrev64s8 (inorder);
+ int8x8_t expected = {8, 7, 6, 5, 4, 3, 2, 1};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64s8.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint16x4_t
+test_vrev64u16 (uint16x4_t _arg)
+{
+ return vrev64_u16 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint16x4_t inorder = {1, 2, 3, 4};
+ uint16x4_t reversed = test_vrev64u16 (inorder);
+ uint16x4_t expected = {4, 3, 2, 1};
+
+ for (i = 0; i < 4; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_u16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64u16.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.4h, ?v\[0-9\]+.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint32x2_t
+test_vrev64u32 (uint32x2_t _arg)
+{
+ return vrev64_u32 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint32x2_t inorder = {1, 2};
+ uint32x2_t reversed = test_vrev64u32 (inorder);
+ uint32x2_t expected = {2, 1};
+
+ for (i = 0; i < 2; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_u32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64u32.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.2s, ?v\[0-9\]+.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
--- /dev/null
+extern void abort (void);
+
+uint8x8_t
+test_vrev64u8 (uint8x8_t _arg)
+{
+ return vrev64_u8 (_arg);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8x8_t inorder = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint8x8_t reversed = test_vrev64u8 (inorder);
+ uint8x8_t expected = {8, 7, 6, 5, 4, 3, 2, 1};
+
+ for (i = 0; i < 8; i++)
+ if (reversed[i] != expected[i])
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Test the `vrev64_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vrev64u8.x"
+
+/* { dg-final { scan-assembler-times "rev64\[ \t\]+v\[0-9\]+.8b, ?v\[0-9\]+.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */