From: Richard Biener Date: Wed, 9 Dec 2020 14:48:36 +0000 (+0100) Subject: tree-optimization/98213 - cache PHI walking result in SM X-Git-Tag: upstream/12.2.0~11068 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=84d08255f9f2f7137caf648fcc9dc36101bc893c;p=platform%2Fupstream%2Fgcc.git tree-optimization/98213 - cache PHI walking result in SM This avoids exponential work when walking PHIs in loop store motion. Fails are quickly propagated and thus need no caching. 2020-12-09 Richard Biener PR tree-optimization/98213 * tree-ssa-loop-im.c (sm_seq_valid_bb): Cache successfully processed PHIs. (hoist_memory_references): Adjust. * g++.dg/pr98213.C: New testcase. --- diff --git a/gcc/testsuite/g++.dg/pr98213.C b/gcc/testsuite/g++.dg/pr98213.C new file mode 100644 index 0000000..1a744eb --- /dev/null +++ b/gcc/testsuite/g++.dg/pr98213.C @@ -0,0 +1,24 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ + +#include + +long var_23; +int var_24, test_var_8; +extern bool arr_20[][13]; +char arr_21_0_0_0_0_0; +int *test_arr_0; +void test(unsigned long long var_1) +{ + int arr_16; + for (int i_0 = 0;;) + for (int i_5; i_5;) { + for (int i_6 = 0; i_6 < 19; i_6 += 4) + for (long i_7(test_var_8); i_7; i_7 += 2) { + arr_20[0][i_7] = arr_21_0_0_0_0_0 = 0; + var_23 = test_arr_0[0]; + } + var_24 = std::max((unsigned long long)arr_16, + std::min((unsigned long long)5, var_1)); + } +} diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index 92e5a8d..fe48d02 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -2254,7 +2254,8 @@ sm_seq_push_down (vec &seq, unsigned ptr, unsigned *at) static int sm_seq_valid_bb (class loop *loop, basic_block bb, tree vdef, vec &seq, bitmap refs_not_in_seq, - bitmap refs_not_supported, bool forked) + bitmap refs_not_supported, bool forked, + bitmap fully_visited) { if (!vdef) for (gimple_stmt_iterator gsi = gsi_last_bb (bb); !gsi_end_p (gsi); @@ -2276,7 +2277,7 @@ sm_seq_valid_bb (class loop *loop, basic_block bb, tree vdef, /* This handles the perfect nest case. */ return sm_seq_valid_bb (loop, single_pred (bb), vdef, seq, refs_not_in_seq, refs_not_supported, - forked); + forked, fully_visited); return 0; } do @@ -2314,7 +2315,10 @@ sm_seq_valid_bb (class loop *loop, basic_block bb, tree vdef, return sm_seq_valid_bb (loop, gimple_phi_arg_edge (phi, 0)->src, gimple_phi_arg_def (phi, 0), seq, refs_not_in_seq, refs_not_supported, - false); + false, fully_visited); + if (bitmap_bit_p (fully_visited, + SSA_NAME_VERSION (gimple_phi_result (phi)))) + return 1; auto_vec first_edge_seq; auto_bitmap tem_refs_not_in_seq (&lim_bitmap_obstack); int eret; @@ -2323,7 +2327,7 @@ sm_seq_valid_bb (class loop *loop, basic_block bb, tree vdef, gimple_phi_arg_def (phi, 0), first_edge_seq, tem_refs_not_in_seq, refs_not_supported, - true); + true, fully_visited); if (eret != 1) return -1; /* Simplify our lives by pruning the sequence of !sm_ord. */ @@ -2338,7 +2342,7 @@ sm_seq_valid_bb (class loop *loop, basic_block bb, tree vdef, bitmap_copy (tem_refs_not_in_seq, refs_not_in_seq); eret = sm_seq_valid_bb (loop, e->src, vuse, edge_seq, tem_refs_not_in_seq, refs_not_supported, - true); + true, fully_visited); if (eret != 1) return -1; /* Simplify our lives by pruning the sequence of !sm_ord. */ @@ -2419,6 +2423,8 @@ sm_seq_valid_bb (class loop *loop, basic_block bb, tree vdef, seq[new_idx].from = NULL_TREE; } } + bitmap_set_bit (fully_visited, + SSA_NAME_VERSION (gimple_phi_result (phi))); return 1; } lim_aux_data *data = get_lim_data (def); @@ -2494,9 +2500,11 @@ hoist_memory_references (class loop *loop, bitmap mem_refs, seq.create (4); auto_bitmap refs_not_in_seq (&lim_bitmap_obstack); bitmap_copy (refs_not_in_seq, mem_refs); + auto_bitmap fully_visited; int res = sm_seq_valid_bb (loop, e->src, NULL_TREE, seq, refs_not_in_seq, - refs_not_supported, false); + refs_not_supported, false, + fully_visited); if (res != 1) { bitmap_copy (refs_not_supported, mem_refs);