aarch64: Reimplement vshrn_n* intrinsics using builtins
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>
Fri, 22 Jan 2021 14:16:30 +0000 (14:16 +0000)
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>
Thu, 28 Jan 2021 11:42:20 +0000 (11:42 +0000)
This patch reimplements the vshrn_n* intrinsics to use RTL builtins.
These perform a narrowing right shift.

Although the intrinsic generates the half-width mode (e.g. V8HI ->
V8QI), the new pattern generates a full 128-bit mode (V8HI -> V16QI) by representing the
fill-with-zeroes semantics of the SHRN instruction. The narrower (V8QI) result is extracted with a
lowpart subreg.
I found this allows the RTL optimisers to do a better job at optimising
redundant moves away in frequently-occurring SHRN+SRHN2 pairs, like in:
uint8x16_t
foo (uint16x8_t in1, uint16x8_t in2)
{
  uint8x8_t tmp = vshrn_n_u16 (in2, 7);
  uint8x16_t tmp2 = vshrn_high_n_u16 (tmp, in1, 4);
  return tmp2;
}

gcc/ChangeLog:

* config/aarch64/aarch64-simd-builtins.def (shrn): Define
builtin.
* config/aarch64/aarch64-simd.md (aarch64_shrn<mode>_insn_le):
Define.
(aarch64_shrn<mode>_insn_be): Likewise.
(aarch64_shrn<mode>): Likewise.
* config/aarch64/arm_neon.h (vshrn_n_s16): Reimplement using
builtins.
(vshrn_n_s32): Likewise.
(vshrn_n_s64): Likewise.
(vshrn_n_u16): Likewise.
(vshrn_n_u32): Likewise.
(vshrn_n_u64): Likewise.
* config/aarch64/iterators.md (vn_mode): New mode attribute.

gcc/config/aarch64/aarch64-simd-builtins.def
gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/arm_neon.h
gcc/config/aarch64/iterators.md

index a71ae4d..13bc692 100644 (file)
   /* Implemented by aarch64_mls_n<mode>.  */
   BUILTIN_VDQHS (TERNOP, mls_n, 0, NONE)
 
+  /* Implemented by aarch64_shrn<mode>".  */
+  BUILTIN_VQN (SHIFTIMM, shrn, 0, NONE)
+
   /* Implemented by aarch64_<su>mlsl<mode>.  */
   BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE)
   BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE)
index db56b61..872aa83 100644 (file)
   DONE;
 })
 
+(define_insn "aarch64_shrn<mode>_insn_le"
+  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+       (vec_concat:<VNARROWQ2>
+         (truncate:<VNARROWQ>
+           (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
+                 (match_operand:VQN 2 "aarch64_simd_rshift_imm")))
+         (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")))]
+  "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+  "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
+  [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_shrn<mode>_insn_be"
+  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+       (vec_concat:<VNARROWQ2>
+         (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")
+         (truncate:<VNARROWQ>
+           (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
+                 (match_operand:VQN 2 "aarch64_simd_rshift_imm")))))]
+  "TARGET_SIMD && BYTES_BIG_ENDIAN"
+  "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
+  [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_shrn<mode>"
+  [(set (match_operand:<VNARROWQ> 0 "register_operand")
+       (truncate:<VNARROWQ>
+         (lshiftrt:VQN (match_operand:VQN 1 "register_operand")
+           (match_operand:SI 2 "aarch64_simd_shift_imm_offset_<vn_mode>"))))]
+  "TARGET_SIMD"
+  {
+    operands[2] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+                                                INTVAL (operands[2]));
+    rtx tmp = gen_reg_rtx (<VNARROWQ2>mode);
+    if (BYTES_BIG_ENDIAN)
+      emit_insn (gen_aarch64_shrn<mode>_insn_be (tmp, operands[1],
+                               operands[2], CONST0_RTX (<VNARROWQ>mode)));
+    else
+      emit_insn (gen_aarch64_shrn<mode>_insn_le (tmp, operands[1],
+                               operands[2], CONST0_RTX (<VNARROWQ>mode)));
+
+    /* The intrinsic expects a narrow result, so emit a subreg that will get
+       optimized away as appropriate.  */
+    emit_move_insn (operands[0], lowpart_subreg (<VNARROWQ>mode, tmp,
+                                                <VNARROWQ2>mode));
+    DONE;
+  }
+)
+
+
 ;; For quads.
 
 (define_insn "vec_pack_trunc_<mode>"
index b5c1f06..80d7555 100644 (file)
@@ -8584,6 +8584,47 @@ vmovn_u64 (uint64x2_t __a)
   return (uint32x2_t) __builtin_aarch64_xtnv2di ((int64x2_t) __a);
 }
 
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s16 (int16x8_t __a, const int __b)
+{
+  return __builtin_aarch64_shrnv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s32 (int32x4_t __a, const int __b)
+{
+  return __builtin_aarch64_shrnv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s64 (int64x2_t __a, const int __b)
+{
+  return __builtin_aarch64_shrnv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint8x8_t)__builtin_aarch64_shrnv8hi ((int16x8_t)__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint16x4_t)__builtin_aarch64_shrnv4si ((int32x4_t)__a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+  return (uint32x2_t)__builtin_aarch64_shrnv2di ((int64x2_t)__a, __b);
+}
 #define vmull_high_lane_s16(a, b, c)                                    \
   __extension__                                                         \
     ({                                                                  \
@@ -9858,78 +9899,6 @@ vrsqrteq_u32 (uint32x4_t __a)
        result;                                                          \
      })
 
-#define vshrn_n_s16(a, b)                                               \
-  __extension__                                                         \
-    ({                                                                  \
-       int16x8_t a_ = (a);                                              \
-       int8x8_t result;                                                 \
-       __asm__ ("shrn %0.8b,%1.8h,%2"                                   \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vshrn_n_s32(a, b)                                               \
-  __extension__                                                         \
-    ({                                                                  \
-       int32x4_t a_ = (a);                                              \
-       int16x4_t result;                                                \
-       __asm__ ("shrn %0.4h,%1.4s,%2"                                   \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vshrn_n_s64(a, b)                                               \
-  __extension__                                                         \
-    ({                                                                  \
-       int64x2_t a_ = (a);                                              \
-       int32x2_t result;                                                \
-       __asm__ ("shrn %0.2s,%1.2d,%2"                                   \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vshrn_n_u16(a, b)                                               \
-  __extension__                                                         \
-    ({                                                                  \
-       uint16x8_t a_ = (a);                                             \
-       uint8x8_t result;                                                \
-       __asm__ ("shrn %0.8b,%1.8h,%2"                                   \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vshrn_n_u32(a, b)                                               \
-  __extension__                                                         \
-    ({                                                                  \
-       uint32x4_t a_ = (a);                                             \
-       uint16x4_t result;                                               \
-       __asm__ ("shrn %0.4h,%1.4s,%2"                                   \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vshrn_n_u64(a, b)                                               \
-  __extension__                                                         \
-    ({                                                                  \
-       uint64x2_t a_ = (a);                                             \
-       uint32x2_t result;                                               \
-       __asm__ ("shrn %0.2s,%1.2d,%2"                                   \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
 #define vsli_n_p8(a, b, c)                                              \
   __extension__                                                         \
     ({                                                                  \
index b64d770..7db343e 100644 (file)
                           (QI   "qi") (HI    "hi")
                           (SI   "si")])
 
+;; Like ve_mode but for the half-width modes.
+(define_mode_attr vn_mode [(V8HI  "qi") (V4SI  "hi") (V2DI  "si")])
+
 ;; Vm for lane instructions is restricted to FP_LO_REGS.
 (define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
                       (V2SI "w") (V4SI "w") (SI "w")])