aarch64: Use RTL builtins for vmull[_high]_p8 intrinsics
authorJonathan Wright <jonathan.wright@arm.com>
Thu, 4 Feb 2021 23:00:00 +0000 (23:00 +0000)
committerJonathan Wright <jonathan.wright@arm.com>
Fri, 30 Apr 2021 17:40:22 +0000 (18:40 +0100)
Rewrite vmull[_high]_p8 Neon intrinsics to use RTL builtins rather
than inline assembly code, allowing for better scheduling and
optimization.

gcc/ChangeLog:

2021-02-05  Jonathan Wright  <joanthan.wright@arm.com>

* config/aarch64/aarch64-simd-builtins.def: Add pmull[2]
builtin generator macros.
* config/aarch64/aarch64-simd.md (aarch64_pmullv8qi): Define.
(aarch64_pmull_hiv16qi_insn): Define.
(aarch64_pmull_hiv16qi): Define.
* config/aarch64/arm_neon.h (vmull_high_p8): Use RTL builtin
instead of inline asm.
(vmull_p8): Likewise.

gcc/config/aarch64/aarch64-simd-builtins.def
gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/arm_neon.h

index 337ec8d..5d4c01f 100644 (file)
@@ -46,6 +46,8 @@
   BUILTIN_VDC (COMBINE, combine, 0, AUTO_FP)
   VAR1 (COMBINEP, combine, 0, NONE, di)
   BUILTIN_VB (BINOP, pmul, 0, NONE)
+  VAR1 (BINOP, pmull, 0, NONE, v8qi)
+  VAR1 (BINOP, pmull_hi, 0, NONE, v16qi)
   BUILTIN_VHSDF_HSDF (BINOP, fmulx, 0, FP)
   BUILTIN_VHSDF_DF (UNOP, sqrt, 2, FP)
   BUILTIN_VDQ_I (BINOP, addp, 0, NONE)
index fbfed33..65e6390 100644 (file)
   [(set_attr "type" "neon_mul_<Vetype><q>")]
 )
 
+(define_insn "aarch64_pmullv8qi"
+  [(set (match_operand:V8HI 0 "register_operand" "=w")
+        (unspec:V8HI [(match_operand:V8QI 1 "register_operand" "w")
+                     (match_operand:V8QI 2 "register_operand" "w")]
+                    UNSPEC_PMULL))]
+ "TARGET_SIMD"
+ "pmull\\t%0.8h, %1.8b, %2.8b"
+  [(set_attr "type" "neon_mul_b_long")]
+)
+
+(define_insn "aarch64_pmull_hiv16qi_insn"
+  [(set (match_operand:V8HI 0 "register_operand" "=w")
+       (unspec:V8HI
+         [(vec_select:V8QI
+            (match_operand:V16QI 1 "register_operand" "w")
+            (match_operand:V16QI 3 "vect_par_cnst_hi_half" ""))
+          (vec_select:V8QI
+            (match_operand:V16QI 2 "register_operand" "w")
+            (match_dup 3))]
+         UNSPEC_PMULL))]
+ "TARGET_SIMD"
+ "pmull2\\t%0.8h, %1.16b, %2.16b"
+  [(set_attr "type" "neon_mul_b_long")]
+)
+
+(define_expand "aarch64_pmull_hiv16qi"
+  [(match_operand:V8HI 0 "register_operand")
+   (match_operand:V16QI 1 "register_operand")
+   (match_operand:V16QI 2 "register_operand")]
+ "TARGET_SIMD"
+ {
+   rtx p = aarch64_simd_vect_par_cnst_half (V16QImode, 16, true);
+   emit_insn (gen_aarch64_pmull_hiv16qi_insn (operands[0], operands[1],
+                                             operands[2], p));
+   DONE;
+ }
+)
+
 ;; fmulx.
 
 (define_insn "aarch64_fmulx<mode>"
index 4b8ec52..bde2d17 100644 (file)
@@ -8228,12 +8228,8 @@ __extension__ extern __inline poly16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmull_high_p8 (poly8x16_t __a, poly8x16_t __b)
 {
-  poly16x8_t __result;
-  __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
-           : "=w"(__result)
-           : "w"(__a), "w"(__b)
-           : /* No clobbers */);
-  return __result;
+  return (poly16x8_t) __builtin_aarch64_pmull_hiv16qi ((int8x16_t) __a,
+                                                      (int8x16_t) __b);
 }
 
 __extension__ extern __inline int16x8_t
@@ -8366,12 +8362,8 @@ __extension__ extern __inline poly16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmull_p8 (poly8x8_t __a, poly8x8_t __b)
 {
-  poly16x8_t __result;
-  __asm__ ("pmull %0.8h, %1.8b, %2.8b"
-           : "=w"(__result)
-           : "w"(__a), "w"(__b)
-           : /* No clobbers */);
-  return __result;
+  return (poly16x8_t) __builtin_aarch64_pmullv8qi ((int8x8_t) __a,
+                                                  (int8x8_t) __b);
 }
 
 __extension__ extern __inline int16x8_t