[AArch64] Add truncation for partial SVE modes
authorRichard Sandiford <richard.sandiford@arm.com>
Sat, 16 Nov 2019 11:14:51 +0000 (11:14 +0000)
committerRichard Sandiford <rsandifo@gcc.gnu.org>
Sat, 16 Nov 2019 11:14:51 +0000 (11:14 +0000)
This patch adds support for "truncating" to a partial SVE vector from
either a full SVE vector or a wider partial vector.  This truncation is
actually a no-op and so should have zero cost in the vector cost model.

2019-11-16  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
* config/aarch64/aarch64-sve.md
(trunc<SVE_HSDI:mode><SVE_PARTIAL_I:mode>2): New pattern.
* config/aarch64/aarch64.c (aarch64_integer_truncation_p): New
function.
(aarch64_sve_adjust_stmt_cost): Call it.

gcc/testsuite/
* gcc.target/aarch64/sve/mask_struct_load_1.c: Add
--param aarch64-sve-compare-costs=0.
* gcc.target/aarch64/sve/mask_struct_load_2.c: Likewise.
* gcc.target/aarch64/sve/mask_struct_load_3.c: Likewise.
* gcc.target/aarch64/sve/mask_struct_load_4.c: Likewise.
* gcc.target/aarch64/sve/mask_struct_load_5.c: Likewise.
* gcc.target/aarch64/sve/pack_1.c: Likewise.
* gcc.target/aarch64/sve/truncate_1.c: New test.

From-SVN: r278344

gcc/ChangeLog
gcc/config/aarch64/aarch64-sve.md
gcc/config/aarch64/aarch64.c
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.target/aarch64/sve/mask_struct_load_1.c
gcc/testsuite/gcc.target/aarch64/sve/mask_struct_load_2.c
gcc/testsuite/gcc.target/aarch64/sve/mask_struct_load_3.c
gcc/testsuite/gcc.target/aarch64/sve/mask_struct_load_4.c
gcc/testsuite/gcc.target/aarch64/sve/mask_struct_load_5.c
gcc/testsuite/gcc.target/aarch64/sve/pack_1.c
gcc/testsuite/gcc.target/aarch64/sve/truncate_1.c [new file with mode: 0644]

index 83931c5..7fe9a11 100644 (file)
@@ -1,6 +1,14 @@
 2019-11-16  Richard Sandiford  <richard.sandiford@arm.com>
 
        * config/aarch64/aarch64-sve.md
+       (trunc<SVE_HSDI:mode><SVE_PARTIAL_I:mode>2): New pattern.
+       * config/aarch64/aarch64.c (aarch64_integer_truncation_p): New
+       function.
+       (aarch64_sve_adjust_stmt_cost): Call it.
+
+2019-11-16  Richard Sandiford  <richard.sandiford@arm.com>
+
+       * config/aarch64/aarch64-sve.md
        (@aarch64_load_<ANY_EXTEND:optab><VNx8_WIDE:mode><VNx8_NARROW:mode>):
        (@aarch64_load_<ANY_EXTEND:optab><VNx4_WIDE:mode><VNx4_NARROW:mode>)
        (@aarch64_load_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>):
index ce1bd58..158a178 100644 (file)
@@ -72,6 +72,7 @@
 ;; ---- [INT] General unary arithmetic corresponding to rtx codes
 ;; ---- [INT] General unary arithmetic corresponding to unspecs
 ;; ---- [INT] Sign and zero extension
+;; ---- [INT] Truncation
 ;; ---- [INT] Logical inverse
 ;; ---- [FP<-INT] General unary arithmetic that maps to unspecs
 ;; ---- [FP] General unary arithmetic corresponding to unspecs
 )
 
 ;; -------------------------------------------------------------------------
+;; ---- [INT] Truncation
+;; -------------------------------------------------------------------------
+;; The patterns in this section are synthetic.
+;; -------------------------------------------------------------------------
+
+;; Truncate to a partial SVE vector from either a full vector or a
+;; wider partial vector.  This is a no-op, because we can just ignore
+;; the unused upper bits of the source.
+(define_insn_and_split "trunc<SVE_HSDI:mode><SVE_PARTIAL_I:mode>2"
+  [(set (match_operand:SVE_PARTIAL_I 0 "register_operand" "=w")
+       (truncate:SVE_PARTIAL_I
+         (match_operand:SVE_HSDI 1 "register_operand" "w")))]
+  "TARGET_SVE && (~<SVE_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
+  "#"
+  "&& reload_completed"
+  [(set (match_dup 0) (match_dup 1))]
+  {
+    operands[1] = aarch64_replace_reg_mode (operands[1],
+                                           <SVE_PARTIAL_I:MODE>mode);
+  }
+)
+
+;; -------------------------------------------------------------------------
 ;; ---- [INT] Logical inverse
 ;; -------------------------------------------------------------------------
 ;; Includes:
index 305c6da..f710aa2 100644 (file)
@@ -12901,6 +12901,21 @@ aarch64_extending_load_p (stmt_vec_info stmt_info)
          && DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info)));
 }
 
+/* Return true if STMT_INFO is an integer truncation.  */
+static bool
+aarch64_integer_truncation_p (stmt_vec_info stmt_info)
+{
+  gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
+  if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
+    return false;
+
+  tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
+  tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
+  return (INTEGRAL_TYPE_P (lhs_type)
+         && INTEGRAL_TYPE_P (rhs_type)
+         && TYPE_PRECISION (lhs_type) < TYPE_PRECISION (rhs_type));
+}
+
 /* STMT_COST is the cost calculated by aarch64_builtin_vectorization_cost
    for STMT_INFO, which has cost kind KIND.  Adjust the cost as necessary
    for SVE targets.  */
@@ -12919,6 +12934,11 @@ aarch64_sve_adjust_stmt_cost (vect_cost_for_stmt kind, stmt_vec_info stmt_info,
   if (kind == vector_stmt && aarch64_extending_load_p (stmt_info))
     stmt_cost = 0;
 
+  /* For similar reasons, vector_stmt integer truncations are a no-op,
+     because we can just ignore the unused upper bits of the source.  */
+  if (kind == vector_stmt && aarch64_integer_truncation_p (stmt_info))
+    stmt_cost = 0;
+
   return stmt_cost;
 }
 
index e13aa9f..28f99e0 100644 (file)
@@ -1,5 +1,16 @@
 2019-11-16  Richard Sandiford  <richard.sandiford@arm.com>
 
+       * gcc.target/aarch64/sve/mask_struct_load_1.c: Add
+       --param aarch64-sve-compare-costs=0.
+       * gcc.target/aarch64/sve/mask_struct_load_2.c: Likewise.
+       * gcc.target/aarch64/sve/mask_struct_load_3.c: Likewise.
+       * gcc.target/aarch64/sve/mask_struct_load_4.c: Likewise.
+       * gcc.target/aarch64/sve/mask_struct_load_5.c: Likewise.
+       * gcc.target/aarch64/sve/pack_1.c: Likewise.
+       * gcc.target/aarch64/sve/truncate_1.c: New test.
+
+2019-11-16  Richard Sandiford  <richard.sandiford@arm.com>
+
        * gcc.target/aarch64/sve/load_extend_1.c: New test.
        * gcc.target/aarch64/sve/load_extend_2.c: Likewise.
        * gcc.target/aarch64/sve/load_extend_3.c: Likewise.
index d450332..03b2b93 100644 (file)
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math --param aarch64-sve-compare-costs=0" } */
 
 #include <stdint.h>
 
index 812b967..87ac317 100644 (file)
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math --param aarch64-sve-compare-costs=0" } */
 
 #include <stdint.h>
 
index 29702ab..54806f9 100644 (file)
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math --param aarch64-sve-compare-costs=0" } */
 
 #include <stdint.h>
 
index 436da08..4c73004 100644 (file)
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math --param aarch64-sve-compare-costs=0" } */
 
 #include <stdint.h>
 
index 08e5824..da367e4 100644 (file)
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math --param aarch64-sve-compare-costs=0" } */
 
 #include <stdint.h>
 
index d9de996..7c7bfdd 100644 (file)
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize" } */
+/* { dg-options "-O2 -ftree-vectorize --param aarch64-sve-compare-costs=0" } */
 
 #include <stdint.h>
 
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/truncate_1.c b/gcc/testsuite/gcc.target/aarch64/sve/truncate_1.c
new file mode 100644 (file)
index 0000000..aeb0645
--- /dev/null
@@ -0,0 +1,44 @@
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+#define TEST_LOOP(TYPE1, TYPE2, SHIFT)                                 \
+  void                                                                 \
+  f_##TYPE1##_##TYPE2 (TYPE2 *restrict dst, TYPE1 *restrict src1,      \
+                      TYPE1 *restrict src2, int n)                     \
+  {                                                                    \
+    for (int i = 0; i < n; ++i)                                                \
+      dst[i] = (TYPE1) (src1[i] + src2[i]) >> SHIFT;                   \
+  }
+
+#define TEST_ALL(T) \
+  T (uint16_t, uint8_t, 2) \
+  T (uint32_t, uint8_t, 18) \
+  T (uint64_t, uint8_t, 34) \
+  T (uint32_t, uint16_t, 3) \
+  T (uint64_t, uint16_t, 19) \
+  T (uint64_t, uint32_t, 4)
+
+TEST_ALL (TEST_LOOP)
+
+/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.h,} 2 } } */
+/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s,} 4 } } */
+/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d,} 6 } } */
+
+/* { dg-final { scan-assembler-times {\tadd\tz[0-9]+\.h, z[0-9]+\.h, z[0-9]+\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tadd\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tadd\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
+
+/* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.h, z[0-9]+\.h, #2\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.s, z[0-9]+\.s, #18\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.d, z[0-9]+\.d, #34\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.s, z[0-9]+\.s, #3\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.d, z[0-9]+\.d, #19\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.d, z[0-9]+\.d, #4\n} 1 } } */
+
+/* { dg-final { scan-assembler-times {\tst1b\tz[0-9]+\.h,} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1b\tz[0-9]+\.s,} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1b\tz[0-9]+\.d,} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1h\tz[0-9]+\.s,} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1h\tz[0-9]+\.d,} 1 } } */
+/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.d,} 1 } } */