aarch64: Model zero-high-half semantics of XTN instruction in RTL
authorJonathan Wright <jonathan.wright@arm.com>
Fri, 11 Jun 2021 14:48:51 +0000 (15:48 +0100)
committerJonathan Wright <jonathan.wright@arm.com>
Wed, 16 Jun 2021 13:21:52 +0000 (14:21 +0100)
Modeling the zero-high-half semantics of the XTN narrowing
instruction in RTL indicates to the compiler that this is a totally
destructive operation. This enables more RTL simplifications and also
prevents some register allocation issues.

Add new tests to narrow_zero_high_half.c to verify the benefit of
this change.

gcc/ChangeLog:

2021-06-11  Jonathan Wright  <jonathan.wright@arm.com>

* config/aarch64/aarch64-simd.md (aarch64_xtn<mode>_insn_le):
Define - modeling zero-high-half semantics.
(aarch64_xtn<mode>): Change to an expander that emits the
appropriate instruction depending on endianness.
(aarch64_xtn<mode>_insn_be): Define - modeling zero-high-half
semantics.
(aarch64_xtn2<mode>_le): Rename to...
(aarch64_xtn2<mode>_insn_le): This.
(aarch64_xtn2<mode>_be): Rename to...
(aarch64_xtn2<mode>_insn_be): This.
(vec_pack_trunc_<mode>): Emit truncation instruction instead
of aarch64_xtn.
* config/aarch64/iterators.md (Vnarrowd): Add Vnarrowd mode
attribute iterator.

gcc/testsuite/ChangeLog:

* gcc.target/aarch64/narrow_zero_high_half.c: Add new tests.

gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/iterators.md
gcc/testsuite/gcc.target/aarch64/narrow_zero_high_half.c

index e750fae..b23556b 100644 (file)
 
 ;; Narrowing operations.
 
-;; For doubles.
+(define_insn "aarch64_xtn<mode>_insn_le"
+  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+       (vec_concat:<VNARROWQ2>
+         (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
+         (match_operand:<VNARROWQ> 2 "aarch64_simd_or_scalar_imm_zero")))]
+  "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+  "xtn\\t%0.<Vntype>, %1.<Vtype>"
+  [(set_attr "type" "neon_move_narrow_q")]
+)
 
-(define_insn "aarch64_xtn<mode>"
-  [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
-       (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
-  "TARGET_SIMD"
+(define_insn "aarch64_xtn<mode>_insn_be"
+  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+       (vec_concat:<VNARROWQ2>
+         (match_operand:<VNARROWQ> 2 "aarch64_simd_or_scalar_imm_zero")
+         (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))))]
+  "TARGET_SIMD && BYTES_BIG_ENDIAN"
   "xtn\\t%0.<Vntype>, %1.<Vtype>"
   [(set_attr "type" "neon_move_narrow_q")]
 )
 
-(define_insn "aarch64_xtn2<mode>_le"
+(define_expand "aarch64_xtn<mode>"
+  [(set (match_operand:<VNARROWQ> 0 "register_operand")
+       (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand")))]
+  "TARGET_SIMD"
+  {
+    rtx tmp = gen_reg_rtx (<VNARROWQ2>mode);
+    if (BYTES_BIG_ENDIAN)
+      emit_insn (gen_aarch64_xtn<mode>_insn_be (tmp, operands[1],
+                               CONST0_RTX (<VNARROWQ>mode)));
+    else
+      emit_insn (gen_aarch64_xtn<mode>_insn_le (tmp, operands[1],
+                               CONST0_RTX (<VNARROWQ>mode)));
+
+    /* The intrinsic expects a narrow result, so emit a subreg that will get
+       optimized away as appropriate.  */
+    emit_move_insn (operands[0], lowpart_subreg (<VNARROWQ>mode, tmp,
+                                                <VNARROWQ2>mode));
+    DONE;
+  }
+)
+
+(define_insn "aarch64_xtn2<mode>_insn_le"
   [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
        (vec_concat:<VNARROWQ2>
          (match_operand:<VNARROWQ> 1 "register_operand" "0")
   [(set_attr "type" "neon_move_narrow_q")]
 )
 
-(define_insn "aarch64_xtn2<mode>_be"
+(define_insn "aarch64_xtn2<mode>_insn_be"
   [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
        (vec_concat:<VNARROWQ2>
          (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))
   "TARGET_SIMD"
   {
     if (BYTES_BIG_ENDIAN)
-      emit_insn (gen_aarch64_xtn2<mode>_be (operands[0], operands[1],
-                                            operands[2]));
+      emit_insn (gen_aarch64_xtn2<mode>_insn_be (operands[0], operands[1],
+                                                operands[2]));
     else
-      emit_insn (gen_aarch64_xtn2<mode>_le (operands[0], operands[1],
-                                            operands[2]));
+      emit_insn (gen_aarch64_xtn2<mode>_insn_le (operands[0], operands[1],
+                                                operands[2]));
     DONE;
   }
 )
 
+;; Packing doubles.
+
 (define_expand "vec_pack_trunc_<mode>"
  [(match_operand:<VNARROWD> 0 "register_operand")
   (match_operand:VDN 1 "register_operand")
 
   emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[lo]));
   emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[hi]));
-  emit_insn (gen_aarch64_xtn<Vdbl> (operands[0], tempreg));
+  emit_insn (gen_trunc<Vdbl><Vnarrowd>2 (operands[0], tempreg));
   DONE;
 })
 
+;; Packing quads.
+
+(define_expand "vec_pack_trunc_<mode>"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand")
+       (vec_concat:<VNARROWQ2>
+        (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand"))
+        (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand"))))]
+ "TARGET_SIMD"
+ {
+   rtx tmpreg = gen_reg_rtx (<VNARROWQ>mode);
+   int lo = BYTES_BIG_ENDIAN ? 2 : 1;
+   int hi = BYTES_BIG_ENDIAN ? 1 : 2;
+
+   emit_insn (gen_trunc<mode><Vnarrowq>2 (tmpreg, operands[lo]));
+
+   if (BYTES_BIG_ENDIAN)
+     emit_insn (gen_aarch64_xtn2<mode>_insn_be (operands[0], tmpreg,
+                                               operands[hi]));
+   else
+     emit_insn (gen_aarch64_xtn2<mode>_insn_le (operands[0], tmpreg,
+                                               operands[hi]));
+   DONE;
+ }
+)
+
 (define_insn "aarch64_shrn<mode>_insn_le"
   [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
        (vec_concat:<VNARROWQ2>
   }
 )
 
-;; For quads.
-
-(define_expand "vec_pack_trunc_<mode>"
- [(set (match_operand:<VNARROWQ2> 0 "register_operand")
-       (vec_concat:<VNARROWQ2>
-        (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand"))
-        (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand"))))]
- "TARGET_SIMD"
- {
-   rtx tmpreg = gen_reg_rtx (<VNARROWQ>mode);
-   int lo = BYTES_BIG_ENDIAN ? 2 : 1;
-   int hi = BYTES_BIG_ENDIAN ? 1 : 2;
-
-   emit_insn (gen_aarch64_xtn<mode> (tmpreg, operands[lo]));
-
-   if (BYTES_BIG_ENDIAN)
-     emit_insn (gen_aarch64_xtn2<mode>_be (operands[0], tmpreg, operands[hi]));
-   else
-     emit_insn (gen_aarch64_xtn2<mode>_le (operands[0], tmpreg, operands[hi]));
-   DONE;
- }
-)
-
 ;; Widening operations.
 
 (define_insn "aarch64_simd_vec_unpack<su>_lo_<mode>"
index e9047d0..caa42f8 100644 (file)
 ;; Narrowed modes for VDN.
 (define_mode_attr VNARROWD [(V4HI "V8QI") (V2SI "V4HI")
                            (DI   "V2SI")])
+(define_mode_attr Vnarrowd [(V4HI "v8qi") (V2SI "v4hi")
+                           (DI   "v2si")])
 
 ;; Narrowed double-modes for VQN (Used for XTN).
 (define_mode_attr VNARROWQ [(V8HI "V8QI") (V4SI "V4HI")
index a79a4c3..451b011 100644 (file)
@@ -48,6 +48,21 @@ TEST_SHIFT (vqrshrun_n, uint8x16_t, int16x8_t, s16, u8)
 TEST_SHIFT (vqrshrun_n, uint16x8_t, int32x4_t, s32, u16)
 TEST_SHIFT (vqrshrun_n, uint32x4_t, int64x2_t, s64, u32)
 
+#define TEST_UNARY(name, rettype, intype, fs, rs) \
+  rettype test_ ## name ## _ ## fs ## _zero_high \
+               (intype a) \
+       { \
+               return vcombine_ ## rs (name ## _ ## fs (a), \
+                                       vdup_n_ ## rs (0)); \
+       }
+
+TEST_UNARY (vmovn, int8x16_t, int16x8_t, s16, s8)
+TEST_UNARY (vmovn, int16x8_t, int32x4_t, s32, s16)
+TEST_UNARY (vmovn, int32x4_t, int64x2_t, s64, s32)
+TEST_UNARY (vmovn, uint8x16_t, uint16x8_t, u16, u8)
+TEST_UNARY (vmovn, uint16x8_t, uint32x4_t, u32, u16)
+TEST_UNARY (vmovn, uint32x4_t, uint64x2_t, u64, u32)
+
 /* { dg-final { scan-assembler-not "dup\\t" } } */
 
 /* { dg-final { scan-assembler-times "\\tshrn\\tv" 6} }  */
@@ -58,3 +73,4 @@ TEST_SHIFT (vqrshrun_n, uint32x4_t, int64x2_t, s64, u32)
 /* { dg-final { scan-assembler-times "\\tuqrshrn\\tv" 3} }  */
 /* { dg-final { scan-assembler-times "\\tsqshrun\\tv" 3} }  */
 /* { dg-final { scan-assembler-times "\\tsqrshrun\\tv" 3} }  */
+/* { dg-final { scan-assembler-times "\\txtn\\tv" 6} }  */