+2015-10-28 Yvan Roux <yvan.roux@linaro.org>
+ Sebastian Pop <s.pop@samsung.com>
+
+ Backport from trunk r221007, r221675, r222011.
+ 2015-04-11 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/65735
+ * tree-ssa-threadedge.c (fsm_find_control_statement_thread_paths):
+ Remove visited_phis argument, add visited_bbs, avoid recursing into the
+ same bb rather than just into the same phi node.
+ (thread_through_normal_block): Adjust caller.
+
+ 2015-03-25 Sebastian Pop <s.pop@samsung.com>
+
+ PR tree-optimization/65177
+ * tree-ssa-threadupdate.c (verify_seme): Renamed verify_jump_thread.
+ (bb_in_bbs): New.
+ (duplicate_seme_region): Renamed duplicate_thread_path. Redirect all
+ edges not adjacent on the path to the original code.
+
+ 2015-02-26 Sebastian Pop <s.pop@samsung.com>
+
+ PR tree-optimization/65048
+ * tree-ssa-threadupdate.c (valid_jump_thread_path): New.
+ (thread_through_all_blocks): Call valid_jump_thread_path.
+ Remove invalid FSM jump-thread paths.
+
2015-08-05 Christophe Lyon <christophe.lyon@linaro.org>
* LINARO-VERSION: Bump version.
static void
fsm_find_control_statement_thread_paths (tree expr,
- pointer_set_t *visited_phis,
+ pointer_set_t *visited_bbs,
vec<basic_block, va_gc> *&path,
bool seen_loop_phi)
{
return;
/* Avoid infinite recursion. */
- if (pointer_set_insert (visited_phis, def_stmt))
+ if (pointer_set_insert (visited_bbs, var_bb))
return;
int next_path_length = 0;
{
vec_safe_push (path, bbi);
/* Recursively follow SSA_NAMEs looking for a constant definition. */
- fsm_find_control_statement_thread_paths (arg, visited_phis, path,
+ fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
seen_loop_phi);
path->pop ();
vec<basic_block, va_gc> *bb_path;
vec_alloc (bb_path, n_basic_blocks_for_fn (cfun));
vec_safe_push (bb_path, e->dest);
- pointer_set_t *visited_phis = pointer_set_create ();
+ pointer_set_t *visited_bbs = pointer_set_create ();
max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
- fsm_find_control_statement_thread_paths (cond, visited_phis, bb_path,
+ fsm_find_control_statement_thread_paths (cond, visited_bbs, bb_path,
false);
- pointer_set_destroy (visited_phis);
+ pointer_set_destroy (visited_bbs);
vec_free (bb_path);
}
return 0;
return false;
}
-/* Verify that the REGION is a Single Entry Multiple Exits region: make sure no
- edge other than ENTRY is entering the REGION. */
+/* Verify that the REGION is a valid jump thread. A jump thread is a special
+ case of SEME Single Entry Multiple Exits region in which all nodes in the
+ REGION have exactly one incoming edge. The only exception is the first block
+ that may not have been connected to the rest of the cfg yet. */
DEBUG_FUNCTION void
-verify_seme (edge entry, basic_block *region, unsigned n_region)
+verify_jump_thread (basic_block *region, unsigned n_region)
{
- bitmap bbs = BITMAP_ALLOC (NULL);
-
for (unsigned i = 0; i < n_region; i++)
- bitmap_set_bit (bbs, region[i]->index);
+ gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
+}
- for (unsigned i = 0; i < n_region; i++)
- {
- edge e;
- edge_iterator ei;
- basic_block bb = region[i];
+/* Return true when BB is one of the first N items in BBS. */
- /* All predecessors other than ENTRY->src should be in the region. */
- for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ei_next (&ei))
- if (e != entry)
- gcc_assert (bitmap_bit_p (bbs, e->src->index));
- }
+static inline bool
+bb_in_bbs (basic_block bb, basic_block *bbs, int n)
+{
+ for (int i = 0; i < n; i++)
+ if (bb == bbs[i])
+ return true;
- BITMAP_FREE (bbs);
+ return false;
}
-/* Duplicates a Single Entry Multiple Exit REGION (set of N_REGION basic
- blocks). The ENTRY edge is redirected to the duplicate of the region. If
- REGION is not a Single Entry region, ignore any incoming edges other than
- ENTRY: this makes the copied region a Single Entry region.
+/* Duplicates a jump-thread path of N_REGION basic blocks.
+ The ENTRY edge is redirected to the duplicate of the region.
Remove the last conditional statement in the last basic block in the REGION,
and create a single fallthru edge pointing to the same destination as the
Returns false if it is unable to copy the region, true otherwise. */
static bool
-duplicate_seme_region (edge entry, edge exit,
+duplicate_thread_path (edge entry, edge exit,
basic_block *region, unsigned n_region,
basic_block *region_copy)
{
}
copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
- split_edge_bb_loc (entry), 0);
+ split_edge_bb_loc (entry), false);
+
+ /* Fix up: copy_bbs redirects all edges pointing to copied blocks. The
+ following code ensures that all the edges exiting the jump-thread path are
+ redirected back to the original code: these edges are exceptions
+ invalidating the property that is propagated by executing all the blocks of
+ the jump-thread path in order. */
+
+ for (i = 0; i < n_region; i++)
+ {
+ edge e;
+ edge_iterator ei;
+ basic_block bb = region_copy[i];
+
+ if (single_succ_p (bb))
+ {
+ /* Make sure the successor is the next node in the path. */
+ gcc_assert (i + 1 == n_region
+ || region_copy[i + 1] == single_succ_edge (bb)->dest);
+ continue;
+ }
+
+ /* Special case the last block on the path: make sure that it does not
+ jump back on the copied path. */
+ if (i + 1 == n_region)
+ {
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (bb_in_bbs (e->dest, region_copy, n_region - 1))
+ {
+ basic_block orig = get_bb_original (e->dest);
+ if (orig)
+ redirect_edge_and_branch_force (e, orig);
+ }
+ continue;
+ }
+
+ /* Redirect all other edges jumping to non-adjacent blocks back to the
+ original code. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (region_copy[i + 1] != e->dest)
+ {
+ basic_block orig = get_bb_original (e->dest);
+ if (orig)
+ redirect_edge_and_branch_force (e, orig);
+ }
+ }
+
if (total_count)
{
scale_bbs_frequencies_gcov_type (region, n_region,
}
#ifdef ENABLE_CHECKING
- /* Make sure no edge other than ENTRY is entering the copied region. */
- verify_seme (entry, region_copy, n_region);
+ verify_jump_thread (region_copy, n_region);
#endif
/* Remove the last branch in the jump thread path. */
return true;
}
+/* Return true when PATH is a valid jump-thread path. */
+
+static bool
+valid_jump_thread_path (vec<jump_thread_edge *> *path)
+{
+ unsigned len = path->length ();
+
+ /* Check that the path is connected. */
+ for (unsigned int j = 0; j < len - 1; j++)
+ if ((*path)[j]->e->dest != (*path)[j+1]->e->src)
+ return false;
+
+ return true;
+}
+
/* Walk through all blocks and thread incoming edges to the appropriate
outgoing edge for each edge pair recorded in THREADED_EDGES.
vec<jump_thread_edge *> *path = paths[i];
edge entry = (*path)[0]->e;
- if ((*path)[0]->type != EDGE_FSM_THREAD
- /* Do not jump-thread twice from the same block. */
- || bitmap_bit_p (threaded_blocks, entry->src->index)) {
- i++;
- continue;
- }
+ /* Only code-generate FSM jump-threads in this loop. */
+ if ((*path)[0]->type != EDGE_FSM_THREAD)
+ {
+ i++;
+ continue;
+ }
+
+ /* Do not jump-thread twice from the same block. */
+ if (bitmap_bit_p (threaded_blocks, entry->src->index)
+ /* Verify that the jump thread path is still valid: a
+ previous jump-thread may have changed the CFG, and
+ invalidated the current path. */
+ || !valid_jump_thread_path (path))
+ {
+ /* Remove invalid FSM jump-thread paths. */
+ delete_jump_thread_path (path);
+ paths.unordered_remove (i);
+ continue;
+ }
unsigned len = path->length ();
edge exit = (*path)[len - 1]->e;
for (unsigned int j = 0; j < len - 1; j++)
region[j] = (*path)[j]->e->dest;
- if (duplicate_seme_region (entry, exit, region, len - 1, NULL))
+ if (duplicate_thread_path (entry, exit, region, len - 1, NULL))
{
/* We do not update dominance info. */
free_dominance_info (CDI_DOMINATORS);