rs6000: Fix invalid splits when using Altivec style addresses [PR98959]
authorPeter Bergner <bergner@linux.ibm.com>
Mon, 8 Mar 2021 18:20:41 +0000 (12:20 -0600)
committerPeter Bergner <bergner@linux.ibm.com>
Mon, 8 Mar 2021 18:21:39 +0000 (12:21 -0600)
The rs6000_emit_le_vsx_* functions assume they are not passed an Altivec
style "& ~16" address.  However, some of our expanders and splitters do
not verify we do not have an Altivec style address before calling those
functions, leading to an ICE.  The solution here is to guard the expanders
and splitters to ensure we do not call them if we're given an Altivec style
address.

2021-03-08  Peter Bergner  <bergner@linux.ibm.com>

gcc/
PR target/98959
* config/rs6000/rs6000.c (rs6000_emit_le_vsx_permute): Add an assert
to ensure we do not have an Altivec style address.
* config/rs6000/vsx.md (*vsx_le_perm_load_<mode>): Disable if passed
an Altivec style address.
(*vsx_le_perm_store_<mode>): Likewise.
(splitters after *vsx_le_perm_store_<mode>): Likewise.
(vsx_load_<mode>): Disable special expander if passed an Altivec
style address.
(vsx_store_<mode>): Likewise.

gcc/testsuite/
PR target/98959
* gcc.target/powerpc/pr98959.c: New test.

gcc/config/rs6000/rs6000.c
gcc/config/rs6000/vsx.md
gcc/testsuite/gcc.target/powerpc/pr98959.c [new file with mode: 0644]

index cf8e5d8..c89fb6e 100644 (file)
@@ -10111,6 +10111,9 @@ rs6000_const_vec (machine_mode mode)
 void
 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
 {
+  gcc_assert (!altivec_indexed_or_indirect_operand (dest, mode));
+  gcc_assert (!altivec_indexed_or_indirect_operand (source, mode));
+
   /* Scalar permutations are easier to express in integer modes rather than
      floating-point modes, so cast them here.  We use V1TImode instead
      of TImode to ensure that the values don't go through GPRs.  */
index ad67396..a1fa4f9 100644 (file)
 (define_insn_and_split "*vsx_le_perm_load_<mode>"
   [(set (match_operand:VSX_LE_128 0 "vsx_register_operand" "=wa,r")
         (match_operand:VSX_LE_128 1 "memory_operand" "Z,Q"))]
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR
+   && !altivec_indexed_or_indirect_operand (operands[1], <MODE>mode)"
   "@
    #
    #"
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR
+   && !altivec_indexed_or_indirect_operand (operands[1], <MODE>mode)"
   [(const_int 0)]
 {
   rtx tmp = (can_create_pseudo_p ()
 (define_insn "*vsx_le_perm_store_<mode>"
   [(set (match_operand:VSX_LE_128 0 "memory_operand" "=Z,Q")
         (match_operand:VSX_LE_128 1 "vsx_register_operand" "+wa,r"))]
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR
+   & !altivec_indexed_or_indirect_operand (operands[0], <MODE>mode)"
   "@
    #
    #"
 (define_split
   [(set (match_operand:VSX_LE_128 0 "memory_operand")
         (match_operand:VSX_LE_128 1 "vsx_register_operand"))]
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && !reload_completed && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && !reload_completed && !TARGET_P9_VECTOR
+   && !altivec_indexed_or_indirect_operand (operands[0], <MODE>mode)"
   [(const_int 0)]
 {
   rtx tmp = (can_create_pseudo_p ()
 (define_split
   [(set (match_operand:VSX_LE_128 0 "memory_operand")
         (match_operand:VSX_LE_128 1 "vsx_register_operand"))]
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && reload_completed && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && reload_completed && !TARGET_P9_VECTOR
+   && !altivec_indexed_or_indirect_operand (operands[0], <MODE>mode)"
   [(const_int 0)]
 {
   rs6000_emit_le_vsx_permute (operands[1], operands[1], <MODE>mode);
   "VECTOR_MEM_VSX_P (<MODE>mode)"
 {
   /* Expand to swaps if needed, prior to swap optimization.  */
-  if (!BYTES_BIG_ENDIAN && !TARGET_P9_VECTOR)
+  if (!BYTES_BIG_ENDIAN && !TARGET_P9_VECTOR
+      && !altivec_indexed_or_indirect_operand(operands[1], <MODE>mode))
     {
       rs6000_emit_le_vsx_move (operands[0], operands[1], <MODE>mode);
       DONE;
   "VECTOR_MEM_VSX_P (<MODE>mode)"
 {
   /* Expand to swaps if needed, prior to swap optimization.  */
-  if (!BYTES_BIG_ENDIAN && !TARGET_P9_VECTOR)
+  if (!BYTES_BIG_ENDIAN && !TARGET_P9_VECTOR
+      && !altivec_indexed_or_indirect_operand(operands[0], <MODE>mode))
     {
       rs6000_emit_le_vsx_move (operands[0], operands[1], <MODE>mode);
       DONE;
diff --git a/gcc/testsuite/gcc.target/powerpc/pr98959.c b/gcc/testsuite/gcc.target/powerpc/pr98959.c
new file mode 100644 (file)
index 0000000..9e8523d
--- /dev/null
@@ -0,0 +1,17 @@
+/* PR target/98959 */
+/* { dg-options "-fno-schedule-insns -O2 -mcmodel=small" } */
+
+/* Verify we do not ICE on the following.  */
+
+typedef __attribute__ ((altivec (vector__))) unsigned __int128 v1ti_t;
+
+v1ti_t foo (v1ti_t v);
+
+void
+bug ()
+{
+  v1ti_t dv = { ((31415926539) << 6) };
+  dv = foo (dv);
+  if (dv[0] != 0)
+    __builtin_abort ();
+}