From 4dcc4502f316a7320fe72b62c60af12c77e1c96c Mon Sep 17 00:00:00 2001 From: Richard Biener Date: Mon, 23 Mar 2020 13:08:41 +0100 Subject: [PATCH] tree-optimization/94261 - avoid IL adjustments in SLP analysis The remaining IL adjustment done by SLP analysis turns out harmful since we share them in the now multiple analyses states. It turns out we do not actually need those apart from the case where we reorg scalar stmts during re-arrangement when optimizing load permutations in SLP reductions. But that isn't needed either now since we only need to permute non-isomorphic parts which now reside in separate SLP nodes who are all leafs. 2020-03-23 Richard Biener PR tree-optimization/94261 * tree-vect-slp.c (vect_get_and_check_slp_defs): Remove IL operand swapping code. (vect_slp_rearrange_stmts): Do not arrange isomorphic nodes that would need operation code adjustments. --- gcc/ChangeLog | 8 ++++++++ gcc/tree-vect-slp.c | 54 ++++++++--------------------------------------------- 2 files changed, 16 insertions(+), 46 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 234afee..8804b0c 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,11 @@ +2020-03-23 Richard Biener + + PR tree-optimization/94261 + * tree-vect-slp.c (vect_get_and_check_slp_defs): Remove + IL operand swapping code. + (vect_slp_rearrange_stmts): Do not arrange isomorphic + nodes that would need operation code adjustments. + 2020-03-23 Tobias Burnus * doc/install.texi (amdgcn-*-amdhsa): Renamed diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c index e43d03b..f6331ee 100644 --- a/gcc/tree-vect-slp.c +++ b/gcc/tree-vect-slp.c @@ -562,52 +562,6 @@ again: /* Swap operands. */ if (swapped) { - if (first_op_cond) - { - /* If there are already uses of this stmt in a SLP instance then - we've committed to the operand order and can't swap it. */ - if (STMT_VINFO_NUM_SLP_USES (stmt_info) != 0) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: cannot swap operands of " - "shared stmt %G", stmt_info->stmt); - return -1; - } - - /* To get rid of this swapping we have to move the stmt code - to the SLP tree as well (and gather it here per stmt). */ - gassign *stmt = as_a (stmt_info->stmt); - tree cond = gimple_assign_rhs1 (stmt); - enum tree_code code = TREE_CODE (cond); - - /* Swap. */ - if (*swap == 1) - { - swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0), - &TREE_OPERAND (cond, 1)); - TREE_SET_CODE (cond, swap_tree_comparison (code)); - } - /* Invert. */ - else - { - swap_ssa_operands (stmt, gimple_assign_rhs2_ptr (stmt), - gimple_assign_rhs3_ptr (stmt)); - if (STMT_VINFO_REDUC_IDX (stmt_info) == 1) - STMT_VINFO_REDUC_IDX (stmt_info) = 2; - else if (STMT_VINFO_REDUC_IDX (stmt_info) == 2) - STMT_VINFO_REDUC_IDX (stmt_info) = 1; - bool honor_nans = HONOR_NANS (TREE_OPERAND (cond, 0)); - code = invert_tree_comparison (TREE_CODE (cond), honor_nans); - gcc_assert (code != ERROR_MARK); - TREE_SET_CODE (cond, code); - } - } - else - { - /* Commutative ops need not reflect swapping, ops are in - the SLP tree. */ - } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "swapped operands to match def types in %G", @@ -1815,6 +1769,14 @@ vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size, if (SLP_TREE_SCALAR_STMTS (node).exists ()) { gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ()); + /* ??? Computation nodes are isomorphic and need no rearrangement. + This is a quick hack to cover those where rearrangement breaks + semantics because only the first stmt is guaranteed to have the + correct operation code due to others being swapped or inverted. */ + stmt_vec_info first = SLP_TREE_SCALAR_STMTS (node)[0]; + if (is_gimple_assign (first->stmt) + && gimple_assign_rhs_code (first->stmt) == COND_EXPR) + return; vec tmp_stmts; tmp_stmts.create (group_size); tmp_stmts.quick_grow (group_size); -- 2.7.4