Require equal shift amounts for IFN_DIV_POW2
authorRichard Sandiford <richard.sandiford@arm.com>
Mon, 6 Jan 2020 18:00:15 +0000 (18:00 +0000)
committerRichard Sandiford <rsandifo@gcc.gnu.org>
Mon, 6 Jan 2020 18:00:15 +0000 (18:00 +0000)
IFN_DIV_POW2 currently requires all elements to be shifted by the
same amount, in a similar way as for WIDEN_LSHIFT_EXPR.  This patch
enforces that when building the SLP tree.

If in future targets want to support IFN_DIV_POW2 without this
restriction, we'll probably need the kind of vector-vector/
vector-scalar split that we already have for normal shifts.

2020-01-06  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
* tree-vect-slp.c (vect_build_slp_tree_1): Require all shifts
in an IFN_DIV_POW2 node to be equal.

gcc/testsuite/
* gcc.target/aarch64/sve/asrdiv_1.c: Remove trailing %s.
* gcc.target/aarch64/sve/asrdiv_2.c: New test.
* gcc.target/aarch64/sve/asrdiv_3.c: Likewise.

From-SVN: r279908

gcc/ChangeLog
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.target/aarch64/sve/asrdiv_1.c
gcc/testsuite/gcc.target/aarch64/sve/asrdiv_2.c [new file with mode: 0644]
gcc/testsuite/gcc.target/aarch64/sve/asrdiv_3.c [new file with mode: 0644]
gcc/tree-vect-slp.c

index d72076f..80ace59 100644 (file)
@@ -1,5 +1,10 @@
 2020-01-06  Richard Sandiford  <richard.sandiford@arm.com>
 
+       * tree-vect-slp.c (vect_build_slp_tree_1): Require all shifts
+       in an IFN_DIV_POW2 node to be equal.
+
+2020-01-06  Richard Sandiford  <richard.sandiford@arm.com>
+
        * tree-vect-stmts.c (vect_check_load_store_mask): Rename to...
        (vect_check_scalar_mask): ...this.
        (vectorizable_store, vectorizable_load): Update call accordingly.
index 3a6f6c6..3e778e9 100644 (file)
@@ -1,5 +1,11 @@
 2020-01-06  Richard Sandiford  <richard.sandiford@arm.com>
 
+       * gcc.target/aarch64/sve/asrdiv_1.c: Remove trailing %s.
+       * gcc.target/aarch64/sve/asrdiv_2.c: New test.
+       * gcc.target/aarch64/sve/asrdiv_3.c: Likewise.
+
+2020-01-06  Richard Sandiford  <richard.sandiford@arm.com>
+
        * gcc.dg/vect/vect-cond-arith-8.c: New test.
        * gcc.target/aarch64/sve/cond_fmul_5.c: Likewise.
 
index 615d8b8..16638af 100644 (file)
@@ -45,7 +45,7 @@ DIVMOD (64);
 /* { dg-final { scan-assembler-times {\tlsl\tz[0-9]+\.d, z[0-9]+\.d, #33\n} 1 } } */
 /* { dg-final { scan-assembler-times {\tsub\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 1 } } */
 
-/* { dg-final { scan-assembler-not {\tasr\t%} } } */
-/* { dg-final { scan-assembler-not {\tlsr\t%} } } */
-/* { dg-final { scan-assembler-not {\tcmplt\t%} } } */
-/* { dg-final { scan-assembler-not {\tand\t%} } } */
+/* { dg-final { scan-assembler-not {\tasr\t} } } */
+/* { dg-final { scan-assembler-not {\tlsr\t} } } */
+/* { dg-final { scan-assembler-not {\tcmplt\t} } } */
+/* { dg-final { scan-assembler-not {\tand\t} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_2.c b/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_2.c
new file mode 100644 (file)
index 0000000..73f51df
--- /dev/null
@@ -0,0 +1,19 @@
+/* { dg-options "-O2 -ftree-vectorize -msve-vector-bits=256" } */
+/* Originally from gcc.dg/vect/pr51583-3.c.  */
+
+int a[8], b[8];
+
+void
+f3 (void)
+{
+  a[0] = b[0] / 8;
+  a[1] = b[1] / 4;
+  a[2] = b[2] / 8;
+  a[3] = b[3] / 4;
+  a[4] = b[4] / 8;
+  a[5] = b[5] / 4;
+  a[6] = b[6] / 8;
+  a[7] = b[7] / 4;
+}
+
+/* { dg-final { scan-assembler-not {\tasrd\t} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_3.c b/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_3.c
new file mode 100644 (file)
index 0000000..f340d51
--- /dev/null
@@ -0,0 +1,19 @@
+/* { dg-options "-O2 -ftree-vectorize -msve-vector-bits=256" } */
+/* Originally from gcc.dg/vect/pr51583-3.c.  */
+
+int a[8], b[8];
+
+void
+f3 (void)
+{
+  a[0] = b[0] / 8;
+  a[1] = b[1] / 8;
+  a[2] = b[2] / 8;
+  a[3] = b[3] / 8;
+  a[4] = b[4] / 8;
+  a[5] = b[5] / 8;
+  a[6] = b[6] / 8;
+  a[7] = b[7] / 8;
+}
+
+/* { dg-final { scan-assembler-times {\tasrd\t} 1 } } */
index e9bd884..9cb724b 100644 (file)
@@ -885,7 +885,8 @@ vect_build_slp_tree_1 (unsigned char *swap,
          && !vect_update_shared_vectype (stmt_info, vectype))
        continue;
 
-      if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
+      gcall *call_stmt = dyn_cast <gcall *> (stmt);
+      if (call_stmt)
        {
          rhs_code = CALL_EXPR;
 
@@ -971,6 +972,12 @@ vect_build_slp_tree_1 (unsigned char *swap,
               need_same_oprnds = true;
               first_op1 = gimple_assign_rhs2 (stmt);
             }
+         else if (call_stmt
+                  && gimple_call_internal_p (call_stmt, IFN_DIV_POW2))
+           {
+             need_same_oprnds = true;
+             first_op1 = gimple_call_arg (call_stmt, 1);
+           }
        }
       else
        {
@@ -1008,15 +1015,20 @@ vect_build_slp_tree_1 (unsigned char *swap,
              continue;
            }
 
-         if (need_same_oprnds
-             && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
+         if (need_same_oprnds)
            {
-             if (dump_enabled_p ())
-               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                "Build SLP failed: different shift "
-                                "arguments in %G", stmt);
-             /* Mismatch.  */
-             continue;
+             tree other_op1 = (call_stmt
+                               ? gimple_call_arg (call_stmt, 1)
+                               : gimple_assign_rhs2 (stmt));
+             if (!operand_equal_p (first_op1, other_op1, 0))
+               {
+                 if (dump_enabled_p ())
+                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+                                    "Build SLP failed: different shift "
+                                    "arguments in %G", stmt);
+                 /* Mismatch.  */
+                 continue;
+               }
            }
 
          if (!load_p && rhs_code == CALL_EXPR)