From 850e5878f8f57244571858f604f28da646be498d Mon Sep 17 00:00:00 2001 From: Kyrylo Tkachov Date: Mon, 1 Feb 2021 21:10:35 +0000 Subject: [PATCH] aarch64: Reimplement vrshrn* intrinsics using builtins This patch moves the vrshrn* intrinsics to builtins away from inline asm. It's a bit of code, but it's very similar to the recent vsrhn* reimplementation except that we use an unspec rather than standard RTL codes for the functionality. gcc/ChangeLog: * config/aarch64/aarch64-simd-builtins.def (rshrn, rshrn2): Define builtins. * config/aarch64/aarch64-simd.md (aarch64_rshrn_insn_le): Define. (aarch64_rshrn_insn_be): Likewise. (aarch64_rshrn): Likewise. (aarch64_rshrn2_insn_le): Likewise. (aarch64_rshrn2_insn_be): Likewise. (aarch64_rshrn2): Likewise. * config/aarch64/aarch64.md (unspec): Add UNSPEC_RSHRN. * config/aarch64/arm_neon.h (vrshrn_high_n_s16): Reimplement using builtin. (vrshrn_high_n_s32): Likewise. (vrshrn_high_n_s64): Likewise. (vrshrn_high_n_u16): Likewise. (vrshrn_high_n_u32): Likewise. (vrshrn_high_n_u64): Likewise. (vrshrn_n_s16): Likewise. (vrshrn_n_s32): Likewise. (vrshrn_n_s64): Likewise. (vrshrn_n_u16): Likewise. (vrshrn_n_u32): Likewise. (vrshrn_n_u64): Likewise. gcc/testsuite/ChangeLog: * gcc.target/aarch64/narrow_high-intrinsics.c: Adjust rshrn2 assembly scan. --- gcc/config/aarch64/aarch64-simd-builtins.def | 6 + gcc/config/aarch64/aarch64-simd.md | 88 ++++++++ gcc/config/aarch64/aarch64.md | 1 + gcc/config/aarch64/arm_neon.h | 225 +++++++-------------- .../gcc.target/aarch64/narrow_high-intrinsics.c | 2 +- 5 files changed, 171 insertions(+), 151 deletions(-) diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 3115b73..aa84815 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -214,6 +214,12 @@ /* Implemented by aarch64_shrn2. */ BUILTIN_VQN (SHIFTACC, shrn2, 0, NONE) + /* Implemented by aarch64_rshrn". */ + BUILTIN_VQN (SHIFTIMM, rshrn, 0, NONE) + + /* Implemented by aarch64_rshrn2. */ + BUILTIN_VQN (SHIFTACC, rshrn2, 0, NONE) + /* Implemented by aarch64_mlsl. */ BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE) BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 1d790f2..992c7b8 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1752,6 +1752,54 @@ } ) +(define_insn "aarch64_rshrn_insn_le" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (unspec: [(match_operand:VQN 1 "register_operand" "w") + (match_operand:VQN 2 "aarch64_simd_rshift_imm") + ] UNSPEC_RSHRN) + (match_operand: 3 "aarch64_simd_or_scalar_imm_zero")))] + "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "rshrn\\t%0., %1., %2" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_insn "aarch64_rshrn_insn_be" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (match_operand: 3 "aarch64_simd_or_scalar_imm_zero") + (unspec: [(match_operand:VQN 1 "register_operand" "w") + (match_operand:VQN 2 "aarch64_simd_rshift_imm") + ] UNSPEC_RSHRN)))] + "TARGET_SIMD && BYTES_BIG_ENDIAN" + "rshrn\\t%0., %1., %2" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_expand "aarch64_rshrn" + [(match_operand: 0 "register_operand") + (match_operand:VQN 1 "register_operand") + (match_operand:SI 2 "aarch64_simd_shift_imm_offset_")] + "TARGET_SIMD" + { + operands[2] = aarch64_simd_gen_const_vector_dup (mode, + INTVAL (operands[2])); + rtx tmp = gen_reg_rtx (mode); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_aarch64_rshrn_insn_be (tmp, operands[1], + operands[2], CONST0_RTX (mode))); + else + emit_insn (gen_aarch64_rshrn_insn_le (tmp, operands[1], + operands[2], CONST0_RTX (mode))); + + /* The intrinsic expects a narrow result, so emit a subreg that will get + optimized away as appropriate. */ + emit_move_insn (operands[0], lowpart_subreg (mode, tmp, + mode)); + DONE; + } +) + (define_insn "aarch64_shrn2_insn_le" [(set (match_operand: 0 "register_operand" "=w") (vec_concat: @@ -1795,6 +1843,46 @@ } ) +(define_insn "aarch64_rshrn2_insn_le" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (match_operand: 1 "register_operand" "0") + (unspec: [(match_operand:VQN 2 "register_operand" "w") + (match_operand:VQN 3 "aarch64_simd_rshift_imm")] UNSPEC_RSHRN)))] + "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "rshrn2\\t%0., %2., %3" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_insn "aarch64_rshrn2_insn_be" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (unspec: [(match_operand:VQN 2 "register_operand" "w") + (match_operand:VQN 3 "aarch64_simd_rshift_imm")] UNSPEC_RSHRN) + (match_operand: 1 "register_operand" "0")))] + "TARGET_SIMD && BYTES_BIG_ENDIAN" + "rshrn2\\t%0., %2., %3" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_expand "aarch64_rshrn2" + [(match_operand: 0 "register_operand") + (match_operand: 1 "register_operand") + (match_operand:VQN 2 "register_operand") + (match_operand:SI 3 "aarch64_simd_shift_imm_offset_")] + "TARGET_SIMD" + { + operands[3] = aarch64_simd_gen_const_vector_dup (mode, + INTVAL (operands[3])); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_aarch64_rshrn2_insn_be (operands[0], operands[1], + operands[2], operands[3])); + else + emit_insn (gen_aarch64_rshrn2_insn_le (operands[0], operands[1], + operands[2], operands[3])); + DONE; + } +) ;; For quads. diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 2c4066d..a482419 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -230,6 +230,7 @@ UNSPEC_SSP_SYSREG UNSPEC_SP_SET UNSPEC_SP_TEST + UNSPEC_RSHRN UNSPEC_RSQRT UNSPEC_RSQRTE UNSPEC_RSQRTS diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 691c0c0..fa22330 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -9311,167 +9311,92 @@ vqshrun_high_n_s64 (uint32x2_t __a, int64x2_t __b, const int __c) return __builtin_aarch64_sqshrun2_nv2di_uuss (__a, __b, __c); } -#define vrshrn_high_n_s16(a, b, c) \ - __extension__ \ - ({ \ - int16x8_t b_ = (b); \ - int8x8_t a_ = (a); \ - int8x16_t result = vcombine_s8 \ - (a_, vcreate_s8 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) +{ + return __builtin_aarch64_rshrn2v8hi (__a, __b, __c); +} -#define vrshrn_high_n_s32(a, b, c) \ - __extension__ \ - ({ \ - int32x4_t b_ = (b); \ - int16x4_t a_ = (a); \ - int16x8_t result = vcombine_s16 \ - (a_, vcreate_s16 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) +{ + return __builtin_aarch64_rshrn2v4si (__a, __b, __c); +} -#define vrshrn_high_n_s64(a, b, c) \ - __extension__ \ - ({ \ - int64x2_t b_ = (b); \ - int32x2_t a_ = (a); \ - int32x4_t result = vcombine_s32 \ - (a_, vcreate_s32 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) +{ + return __builtin_aarch64_rshrn2v2di (__a, __b, __c); +} -#define vrshrn_high_n_u16(a, b, c) \ - __extension__ \ - ({ \ - uint16x8_t b_ = (b); \ - uint8x8_t a_ = (a); \ - uint8x16_t result = vcombine_u8 \ - (a_, vcreate_u8 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) +{ + return (uint8x16_t) __builtin_aarch64_rshrn2v8hi ((int8x8_t) __a, + (int16x8_t) __b, __c); +} -#define vrshrn_high_n_u32(a, b, c) \ - __extension__ \ - ({ \ - uint32x4_t b_ = (b); \ - uint16x4_t a_ = (a); \ - uint16x8_t result = vcombine_u16 \ - (a_, vcreate_u16 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) +{ + return (uint16x8_t) __builtin_aarch64_rshrn2v4si ((int16x4_t) __a, + (int32x4_t) __b, __c); +} -#define vrshrn_high_n_u64(a, b, c) \ - __extension__ \ - ({ \ - uint64x2_t b_ = (b); \ - uint32x2_t a_ = (a); \ - uint32x4_t result = vcombine_u32 \ - (a_, vcreate_u32 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) +{ + return (uint32x4_t) __builtin_aarch64_rshrn2v2di ((int32x2_t)__a, + (int64x2_t)__b, __c); +} -#define vrshrn_n_s16(a, b) \ - __extension__ \ - ({ \ - int16x8_t a_ = (a); \ - int8x8_t result; \ - __asm__ ("rshrn %0.8b,%1.8h,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_s16 (int16x8_t __a, const int __b) +{ + return __builtin_aarch64_rshrnv8hi (__a, __b); +} -#define vrshrn_n_s32(a, b) \ - __extension__ \ - ({ \ - int32x4_t a_ = (a); \ - int16x4_t result; \ - __asm__ ("rshrn %0.4h,%1.4s,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_s32 (int32x4_t __a, const int __b) +{ + return __builtin_aarch64_rshrnv4si (__a, __b); +} -#define vrshrn_n_s64(a, b) \ - __extension__ \ - ({ \ - int64x2_t a_ = (a); \ - int32x2_t result; \ - __asm__ ("rshrn %0.2s,%1.2d,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_s64 (int64x2_t __a, const int __b) +{ + return __builtin_aarch64_rshrnv2di (__a, __b); +} -#define vrshrn_n_u16(a, b) \ - __extension__ \ - ({ \ - uint16x8_t a_ = (a); \ - uint8x8_t result; \ - __asm__ ("rshrn %0.8b,%1.8h,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint8x8_t) __builtin_aarch64_rshrnv8hi ((int16x8_t) __a, __b); +} -#define vrshrn_n_u32(a, b) \ - __extension__ \ - ({ \ - uint32x4_t a_ = (a); \ - uint16x4_t result; \ - __asm__ ("rshrn %0.4h,%1.4s,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint16x4_t) __builtin_aarch64_rshrnv4si ((int32x4_t) __a, __b); +} -#define vrshrn_n_u64(a, b) \ - __extension__ \ - ({ \ - uint64x2_t a_ = (a); \ - uint32x2_t result; \ - __asm__ ("rshrn %0.2s,%1.2d,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint32x2_t) __builtin_aarch64_rshrnv2di ((int64x2_t) __a, __b); +} __extension__ extern __inline uint32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) diff --git a/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c b/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c index 0fc47b5..5abcadc 100644 --- a/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c +++ b/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c @@ -111,7 +111,7 @@ ONE (vmovn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64) /* { dg-final { scan-assembler-times "\\taddhn2\\tv" 6} } */ /* { dg-final { scan-assembler-times "rsubhn2\\tv" 6} } */ /* { dg-final { scan-assembler-times "raddhn2\\tv" 6} } */ -/* { dg-final { scan-assembler-times "\\trshrn2 v" 6} } */ +/* { dg-final { scan-assembler-times "\\trshrn2\\tv" 6} } */ /* { dg-final { scan-assembler-times "\\tshrn2\\tv" 6} } */ /* { dg-final { scan-assembler-times "sqshrun2\\tv" 3} } */ /* { dg-final { scan-assembler-times "sqrshrun2\\tv" 3} } */ -- 2.7.4