static struct cfg_stats_d cfg_stats;
-/* Nonzero if we found a computed goto while building basic blocks. */
-static bool found_computed_goto;
-
/* Hash table to store last discriminator assigned for each locus. */
struct locus_discrim_map
{
/* Basic blocks and flowgraphs. */
static void make_blocks (gimple_seq);
-static void factor_computed_gotos (void);
/* Edges. */
static void make_edges (void);
static void assign_discriminators (void);
static void make_cond_expr_edges (basic_block);
static void make_gimple_switch_edges (basic_block);
-static void make_goto_expr_edges (basic_block);
+static bool make_goto_expr_edges (basic_block);
static void make_gimple_asm_edges (basic_block);
static edge gimple_redirect_edge_and_branch (edge, basic_block);
static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
-static unsigned int split_critical_edges (void);
/* Various helpers. */
static inline bool stmt_starts_bb_p (gimple, gimple);
init_empty_tree_cfg ();
- found_computed_goto = 0;
make_blocks (seq);
- /* Computed gotos are hell to deal with, especially if there are
- lots of them with a large number of destinations. So we factor
- them to a common computed goto location before we build the
- edge list. After we convert back to normal form, we will un-factor
- the computed gotos since factoring introduces an unwanted jump. */
- if (found_computed_goto)
- factor_computed_gotos ();
-
/* Make sure there is always at least one block, even if it's empty. */
if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
}
-/* Search for ANNOTATE call with annot_expr_ivdep_kind; if found, remove
- it and set loop->safelen to INT_MAX. We assume that the annotation
- comes immediately before the condition. */
+/* Look for ANNOTATE calls with loop annotation kind; if found, remove
+ them and propagate the information to the loop. We assume that the
+ annotations come immediately before the condition of the loop. */
static void
replace_loop_annotate ()
{
gsi = gsi_last_bb (loop->header);
stmt = gsi_stmt (gsi);
- if (stmt && gimple_code (stmt) == GIMPLE_COND)
+ if (!(stmt && gimple_code (stmt) == GIMPLE_COND))
+ continue;
+ for (gsi_prev_nondebug (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
{
- gsi_prev_nondebug (&gsi);
- if (gsi_end_p (gsi))
- continue;
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_CALL)
- continue;
+ break;
if (!gimple_call_internal_p (stmt)
- || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
- continue;
- if ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1))
- != annot_expr_ivdep_kind)
- continue;
+ || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
+ break;
+ switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
+ {
+ case annot_expr_ivdep_kind:
+ loop->safelen = INT_MAX;
+ break;
+ case annot_expr_no_vector_kind:
+ loop->dont_vectorize = true;
+ break;
+ case annot_expr_vector_kind:
+ loop->force_vectorize = true;
+ cfun->has_force_vectorize_loops = true;
+ break;
+ default:
+ gcc_unreachable ();
+ }
stmt = gimple_build_assign (gimple_call_lhs (stmt),
gimple_call_arg (stmt, 0));
gsi_replace (&gsi, stmt, true);
- loop->safelen = INT_MAX;
}
}
- /* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
+ /* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
FOR_EACH_BB_FN (bb, cfun)
{
- gsi = gsi_last_bb (bb);
- stmt = gsi_stmt (gsi);
- if (stmt && gimple_code (stmt) == GIMPLE_COND)
- gsi_prev_nondebug (&gsi);
- if (gsi_end_p (gsi))
- continue;
- stmt = gsi_stmt (gsi);
- if (gimple_code (stmt) != GIMPLE_CALL)
- continue;
- if (!gimple_call_internal_p (stmt)
- || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
- continue;
- if ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1))
- != annot_expr_ivdep_kind)
- continue;
- warning_at (gimple_location (stmt), 0, "ignoring %<GCC ivdep%> "
- "annotation");
- stmt = gimple_build_assign (gimple_call_lhs (stmt),
- gimple_call_arg (stmt, 0));
- gsi_replace (&gsi, stmt, true);
+ for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ stmt = gsi_stmt (gsi);
+ if (gimple_code (stmt) != GIMPLE_CALL)
+ break;
+ if (!gimple_call_internal_p (stmt)
+ || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
+ break;
+ switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
+ {
+ case annot_expr_ivdep_kind:
+ case annot_expr_no_vector_kind:
+ case annot_expr_vector_kind:
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ warning_at (gimple_location (stmt), 0, "ignoring loop annotation");
+ stmt = gimple_build_assign (gimple_call_lhs (stmt),
+ gimple_call_arg (stmt, 0));
+ gsi_replace (&gsi, stmt, true);
+ }
}
}
GIMPLE_PASS, /* type */
"cfg", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
true, /* has_execute */
TV_TREE_CFG, /* tv_id */
PROP_gimple_leh, /* properties_required */
{}
/* opt_pass methods: */
- unsigned int execute () { return execute_build_cfg (); }
+ virtual unsigned int execute (function *) { return execute_build_cfg (); }
}; // class pass_build_cfg
/* Return true if T is a computed goto. */
-static bool
+bool
computed_goto_p (gimple t)
{
return (gimple_code (t) == GIMPLE_GOTO
if (gsi_end_p (gsi))
return false;
stmt = gsi_stmt (gsi);
- if (is_gimple_debug (stmt))
+ while (is_gimple_debug (stmt) || gimple_clobber_p (stmt))
{
- gsi_next_nondebug (&gsi);
+ gsi_next (&gsi);
if (gsi_end_p (gsi))
return false;
stmt = gsi_stmt (gsi);
}
-/* Search the CFG for any computed gotos. If found, factor them to a
- common computed goto site. Also record the location of that site so
- that we can un-factor the gotos after we have converted back to
- normal form. */
-
-static void
-factor_computed_gotos (void)
-{
- basic_block bb;
- tree factored_label_decl = NULL;
- tree var = NULL;
- gimple factored_computed_goto_label = NULL;
- gimple factored_computed_goto = NULL;
-
- /* We know there are one or more computed gotos in this function.
- Examine the last statement in each basic block to see if the block
- ends with a computed goto. */
-
- FOR_EACH_BB_FN (bb, cfun)
- {
- gimple_stmt_iterator gsi = gsi_last_bb (bb);
- gimple last;
-
- if (gsi_end_p (gsi))
- continue;
-
- last = gsi_stmt (gsi);
-
- /* Ignore the computed goto we create when we factor the original
- computed gotos. */
- if (last == factored_computed_goto)
- continue;
-
- /* If the last statement is a computed goto, factor it. */
- if (computed_goto_p (last))
- {
- gimple assignment;
-
- /* The first time we find a computed goto we need to create
- the factored goto block and the variable each original
- computed goto will use for their goto destination. */
- if (!factored_computed_goto)
- {
- basic_block new_bb = create_empty_bb (bb);
- gimple_stmt_iterator new_gsi = gsi_start_bb (new_bb);
-
- /* Create the destination of the factored goto. Each original
- computed goto will put its desired destination into this
- variable and jump to the label we create immediately
- below. */
- var = create_tmp_var (ptr_type_node, "gotovar");
-
- /* Build a label for the new block which will contain the
- factored computed goto. */
- factored_label_decl = create_artificial_label (UNKNOWN_LOCATION);
- factored_computed_goto_label
- = gimple_build_label (factored_label_decl);
- gsi_insert_after (&new_gsi, factored_computed_goto_label,
- GSI_NEW_STMT);
-
- /* Build our new computed goto. */
- factored_computed_goto = gimple_build_goto (var);
- gsi_insert_after (&new_gsi, factored_computed_goto, GSI_NEW_STMT);
- }
-
- /* Copy the original computed goto's destination into VAR. */
- assignment = gimple_build_assign (var, gimple_goto_dest (last));
- gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
-
- /* And re-vector the computed goto to the new destination. */
- gimple_goto_set_dest (last, factored_label_decl);
- }
- }
-}
-
-
/* Build a flowgraph for the sequence of stmts SEQ. */
static void
codes. */
gimple_set_bb (stmt, bb);
- if (computed_goto_p (stmt))
- found_computed_goto = true;
-
/* If STMT is a basic block terminator, set START_NEW_BLOCK for the
next iteration. */
if (stmt_ends_bb_p (stmt))
}
}
+/* If basic block BB has an abnormal edge to a basic block
+ containing IFN_ABNORMAL_DISPATCHER internal call, return
+ that the dispatcher's basic block, otherwise return NULL. */
+
+basic_block
+get_abnormal_succ_dispatcher (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
+ {
+ gimple_stmt_iterator gsi
+ = gsi_start_nondebug_after_labels_bb (e->dest);
+ gimple g = gsi_stmt (gsi);
+ if (g
+ && is_gimple_call (g)
+ && gimple_call_internal_p (g)
+ && gimple_call_internal_fn (g) == IFN_ABNORMAL_DISPATCHER)
+ return e->dest;
+ }
+ return NULL;
+}
+
+/* Helper function for make_edges. Create a basic block with
+ with ABNORMAL_DISPATCHER internal call in it if needed, and
+ create abnormal edges from BBS to it and from it to FOR_BB
+ if COMPUTED_GOTO is false, otherwise factor the computed gotos. */
+
+static void
+handle_abnormal_edges (basic_block *dispatcher_bbs,
+ basic_block for_bb, int *bb_to_omp_idx,
+ auto_vec<basic_block> *bbs, bool computed_goto)
+{
+ basic_block *dispatcher = dispatcher_bbs + (computed_goto ? 1 : 0);
+ unsigned int idx = 0;
+ basic_block bb;
+ bool inner = false;
+
+ if (bb_to_omp_idx)
+ {
+ dispatcher = dispatcher_bbs + 2 * bb_to_omp_idx[for_bb->index];
+ if (bb_to_omp_idx[for_bb->index] != 0)
+ inner = true;
+ }
+
+ /* If the dispatcher has been created already, then there are basic
+ blocks with abnormal edges to it, so just make a new edge to
+ for_bb. */
+ if (*dispatcher == NULL)
+ {
+ /* Check if there are any basic blocks that need to have
+ abnormal edges to this dispatcher. If there are none, return
+ early. */
+ if (bb_to_omp_idx == NULL)
+ {
+ if (bbs->is_empty ())
+ return;
+ }
+ else
+ {
+ FOR_EACH_VEC_ELT (*bbs, idx, bb)
+ if (bb_to_omp_idx[bb->index] == bb_to_omp_idx[for_bb->index])
+ break;
+ if (bb == NULL)
+ return;
+ }
+
+ /* Create the dispatcher bb. */
+ *dispatcher = create_basic_block (NULL, NULL, for_bb);
+ if (computed_goto)
+ {
+ /* Factor computed gotos into a common computed goto site. Also
+ record the location of that site so that we can un-factor the
+ gotos after we have converted back to normal form. */
+ gimple_stmt_iterator gsi = gsi_start_bb (*dispatcher);
+
+ /* Create the destination of the factored goto. Each original
+ computed goto will put its desired destination into this
+ variable and jump to the label we create immediately below. */
+ tree var = create_tmp_var (ptr_type_node, "gotovar");
+
+ /* Build a label for the new block which will contain the
+ factored computed goto. */
+ tree factored_label_decl
+ = create_artificial_label (UNKNOWN_LOCATION);
+ gimple factored_computed_goto_label
+ = gimple_build_label (factored_label_decl);
+ gsi_insert_after (&gsi, factored_computed_goto_label, GSI_NEW_STMT);
+
+ /* Build our new computed goto. */
+ gimple factored_computed_goto = gimple_build_goto (var);
+ gsi_insert_after (&gsi, factored_computed_goto, GSI_NEW_STMT);
+
+ FOR_EACH_VEC_ELT (*bbs, idx, bb)
+ {
+ if (bb_to_omp_idx
+ && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
+ continue;
+
+ gsi = gsi_last_bb (bb);
+ gimple last = gsi_stmt (gsi);
+
+ gcc_assert (computed_goto_p (last));
+
+ /* Copy the original computed goto's destination into VAR. */
+ gimple assignment
+ = gimple_build_assign (var, gimple_goto_dest (last));
+ gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
+
+ edge e = make_edge (bb, *dispatcher, EDGE_FALLTHRU);
+ e->goto_locus = gimple_location (last);
+ gsi_remove (&gsi, true);
+ }
+ }
+ else
+ {
+ tree arg = inner ? boolean_true_node : boolean_false_node;
+ gimple g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER,
+ 1, arg);
+ gimple_stmt_iterator gsi = gsi_after_labels (*dispatcher);
+ gsi_insert_after (&gsi, g, GSI_NEW_STMT);
+
+ /* Create predecessor edges of the dispatcher. */
+ FOR_EACH_VEC_ELT (*bbs, idx, bb)
+ {
+ if (bb_to_omp_idx
+ && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
+ continue;
+ make_edge (bb, *dispatcher, EDGE_ABNORMAL);
+ }
+ }
+ }
+
+ make_edge (*dispatcher, for_bb, EDGE_ABNORMAL);
+}
+
/* Join all the blocks in the flowgraph. */
static void
{
basic_block bb;
struct omp_region *cur_region = NULL;
+ auto_vec<basic_block> ab_edge_goto;
+ auto_vec<basic_block> ab_edge_call;
+ int *bb_to_omp_idx = NULL;
+ int cur_omp_region_idx = 0;
/* Create an edge from entry to the first block with executable
statements in it. */
gimple last = last_stmt (bb);
bool fallthru;
+ if (bb_to_omp_idx)
+ bb_to_omp_idx[bb->index] = cur_omp_region_idx;
+
if (last)
{
enum gimple_code code = gimple_code (last);
switch (code)
{
case GIMPLE_GOTO:
- make_goto_expr_edges (bb);
+ if (make_goto_expr_edges (bb))
+ ab_edge_goto.safe_push (bb);
fallthru = false;
break;
case GIMPLE_RETURN:
make edges from this call site to all the nonlocal goto
handlers. */
if (stmt_can_make_abnormal_goto (last))
- make_abnormal_goto_edges (bb, true);
+ ab_edge_call.safe_push (bb);
/* If this statement has reachable exception handlers, then
create abnormal edges to them. */
/* BUILTIN_RETURN is really a return statement. */
if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
- make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0), fallthru =
- false;
+ {
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
+ fallthru = false;
+ }
/* Some calls are known not to return. */
else
fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
break;
CASE_GIMPLE_OMP:
- fallthru = make_gimple_omp_edges (bb, &cur_region);
+ fallthru = make_gimple_omp_edges (bb, &cur_region,
+ &cur_omp_region_idx);
+ if (cur_region && bb_to_omp_idx == NULL)
+ bb_to_omp_idx = XCNEWVEC (int, n_basic_blocks_for_fn (cfun));
break;
case GIMPLE_TRANSACTION:
make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
}
+ /* Computed gotos are hell to deal with, especially if there are
+ lots of them with a large number of destinations. So we factor
+ them to a common computed goto location before we build the
+ edge list. After we convert back to normal form, we will un-factor
+ the computed gotos since factoring introduces an unwanted jump.
+ For non-local gotos and abnormal edges from calls to calls that return
+ twice or forced labels, factor the abnormal edges too, by having all
+ abnormal edges from the calls go to a common artificial basic block
+ with ABNORMAL_DISPATCHER internal call and abnormal edges from that
+ basic block to all forced labels and calls returning twice.
+ We do this per-OpenMP structured block, because those regions
+ are guaranteed to be single entry single exit by the standard,
+ so it is not allowed to enter or exit such regions abnormally this way,
+ thus all computed gotos, non-local gotos and setjmp/longjmp calls
+ must not transfer control across SESE region boundaries. */
+ if (!ab_edge_goto.is_empty () || !ab_edge_call.is_empty ())
+ {
+ gimple_stmt_iterator gsi;
+ basic_block dispatcher_bb_array[2] = { NULL, NULL };
+ basic_block *dispatcher_bbs = dispatcher_bb_array;
+ int count = n_basic_blocks_for_fn (cfun);
+
+ if (bb_to_omp_idx)
+ dispatcher_bbs = XCNEWVEC (basic_block, 2 * count);
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple label_stmt = gsi_stmt (gsi);
+ tree target;
+
+ if (gimple_code (label_stmt) != GIMPLE_LABEL)
+ break;
+
+ target = gimple_label_label (label_stmt);
+
+ /* Make an edge to every label block that has been marked as a
+ potential target for a computed goto or a non-local goto. */
+ if (FORCED_LABEL (target))
+ handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
+ &ab_edge_goto, true);
+ if (DECL_NONLOCAL (target))
+ {
+ handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
+ &ab_edge_call, false);
+ break;
+ }
+ }
+
+ if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+ if (!gsi_end_p (gsi))
+ {
+ /* Make an edge to every setjmp-like call. */
+ gimple call_stmt = gsi_stmt (gsi);
+ if (is_gimple_call (call_stmt)
+ && ((gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE)
+ || gimple_call_builtin_p (call_stmt,
+ BUILT_IN_SETJMP_RECEIVER)))
+ handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
+ &ab_edge_call, false);
+ }
+ }
+
+ if (bb_to_omp_idx)
+ XDELETE (dispatcher_bbs);
+ }
+
+ XDELETE (bb_to_omp_idx);
+
free_omp_regions ();
/* Fold COND_EXPR_COND of each COND_EXPR. */
return (*ifun->cfg->x_label_to_block_map)[uid];
}
-/* Create edges for an abnormal goto statement at block BB. If FOR_CALL
- is true, the source statement is a CALL_EXPR instead of a GOTO_EXPR. */
-
-void
-make_abnormal_goto_edges (basic_block bb, bool for_call)
-{
- basic_block target_bb;
- gimple_stmt_iterator gsi;
-
- FOR_EACH_BB_FN (target_bb, cfun)
- {
- for (gsi = gsi_start_bb (target_bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple label_stmt = gsi_stmt (gsi);
- tree target;
+/* Create edges for a goto statement at block BB. Returns true
+ if abnormal edges should be created. */
- if (gimple_code (label_stmt) != GIMPLE_LABEL)
- break;
-
- target = gimple_label_label (label_stmt);
-
- /* Make an edge to every label block that has been marked as a
- potential target for a computed goto or a non-local goto. */
- if ((FORCED_LABEL (target) && !for_call)
- || (DECL_NONLOCAL (target) && for_call))
- {
- make_edge (bb, target_bb, EDGE_ABNORMAL);
- break;
- }
- }
- if (!gsi_end_p (gsi)
- && is_gimple_debug (gsi_stmt (gsi)))
- gsi_next_nondebug (&gsi);
- if (!gsi_end_p (gsi))
- {
- /* Make an edge to every setjmp-like call. */
- gimple call_stmt = gsi_stmt (gsi);
- if (is_gimple_call (call_stmt)
- && (gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE))
- make_edge (bb, target_bb, EDGE_ABNORMAL);
- }
- }
-}
-
-/* Create edges for a goto statement at block BB. */
-
-static void
+static bool
make_goto_expr_edges (basic_block bb)
{
gimple_stmt_iterator last = gsi_last_bb (bb);
edge e = make_edge (bb, label_bb, EDGE_FALLTHRU);
e->goto_locus = gimple_location (goto_t);
gsi_remove (&last, true);
- return;
+ return false;
}
/* A computed GOTO creates abnormal edges. */
- make_abnormal_goto_edges (bb, false);
+ return true;
}
/* Create edges for an asm statement with labels at block BB. */
FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name)
{
+ /* Mark the block if we change the last stmt in it. */
+ if (cfgcleanup_altered_bbs
+ && stmt_ends_bb_p (stmt))
+ bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index);
+
FOR_EACH_IMM_USE_ON_STMT (use, imm_iter)
{
replace_exp (use, val);
gimple orig_stmt = stmt;
size_t i;
- /* Mark the block if we changed the last stmt in it. */
- if (cfgcleanup_altered_bbs
- && stmt_ends_bb_p (stmt))
- bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index);
-
/* FIXME. It shouldn't be required to keep TREE_CONSTANT
on ADDR_EXPRs up-to-date on GIMPLE. Propagation will
only change sth from non-invariant to invariant, and only
fprintf (dump_file, "Removing basic block %d\n", bb->index);
if (dump_flags & TDF_DETAILS)
{
- dump_bb (dump_file, bb, 0, dump_flags);
+ dump_bb (dump_file, bb, 0, TDF_BLOCKS);
fprintf (dump_file, "\n");
}
}
return true;
}
- if (handled_component_p (lhs))
+ if (handled_component_p (lhs)
+ || TREE_CODE (lhs) == MEM_REF
+ || TREE_CODE (lhs) == TARGET_MEM_REF)
res |= verify_types_in_gimple_reference (lhs, true);
/* Special codes we cannot handle via their class. */
return false;
}
- set_loop_copy (loop, loop);
-
/* In case the function is used for loop header copying (which is the primary
use), ensure that EXIT and its copy will be new latch and entry edges. */
if (loop->header == entry->dest)
{
copying_header = true;
- set_loop_copy (loop, loop_outer (loop));
if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
return false;
return false;
}
+ initialize_original_copy_tables ();
+
+ if (copying_header)
+ set_loop_copy (loop, loop_outer (loop));
+ else
+ set_loop_copy (loop, loop);
+
if (!region_copy)
{
region_copy = XNEWVEC (basic_block, n_region);
free_region_copy = true;
}
- initialize_original_copy_tables ();
-
/* Record blocks outside the region that are dominated by something
inside. */
if (update_dominance)
outer->num_nodes -= num_nodes;
loop0->num_nodes -= bbs.length () - num_nodes;
- if (saved_cfun->has_simduid_loops || saved_cfun->has_force_vect_loops)
+ if (saved_cfun->has_simduid_loops || saved_cfun->has_force_vectorize_loops)
{
struct loop *aloop;
for (i = 0; vec_safe_iterate (loops->larray, i, &aloop); i++)
d.to_context);
dest_cfun->has_simduid_loops = true;
}
- if (aloop->force_vect)
- dest_cfun->has_force_vect_loops = true;
+ if (aloop->force_vectorize)
+ dest_cfun->has_force_vectorize_loops = true;
}
}
/* Split all critical edges. */
-static unsigned int
+unsigned int
split_critical_edges (void)
{
basic_block bb;
GIMPLE_PASS, /* type */
"crited", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
true, /* has_execute */
TV_TREE_SPLIT_EDGES, /* tv_id */
PROP_cfg, /* properties_required */
{}
/* opt_pass methods: */
- unsigned int execute () { return split_critical_edges (); }
+ virtual unsigned int execute (function *) { return split_critical_edges (); }
opt_pass * clone () { return new pass_split_crit_edges (m_ctxt); }
}; // class pass_split_crit_edges
\f
-/* Emit return warnings. */
-
-static unsigned int
-execute_warn_function_return (void)
-{
- source_location location;
- gimple last;
- edge e;
- edge_iterator ei;
-
- if (!targetm.warn_func_return (cfun->decl))
- return 0;
-
- /* If we have a path to EXIT, then we do return. */
- if (TREE_THIS_VOLATILE (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0)
- {
- location = UNKNOWN_LOCATION;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
- {
- last = last_stmt (e->src);
- if ((gimple_code (last) == GIMPLE_RETURN
- || gimple_call_builtin_p (last, BUILT_IN_RETURN))
- && (location = gimple_location (last)) != UNKNOWN_LOCATION)
- break;
- }
- if (location == UNKNOWN_LOCATION)
- location = cfun->function_end_locus;
- warning_at (location, 0, "%<noreturn%> function does return");
- }
-
- /* If we see "return;" in some basic block, then we do reach the end
- without returning a value. */
- else if (warn_return_type
- && !TREE_NO_WARNING (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0
- && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl))))
- {
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
- {
- gimple last = last_stmt (e->src);
- if (gimple_code (last) == GIMPLE_RETURN
- && gimple_return_retval (last) == NULL
- && !gimple_no_warning_p (last))
- {
- location = gimple_location (last);
- if (location == UNKNOWN_LOCATION)
- location = cfun->function_end_locus;
- warning_at (location, OPT_Wreturn_type, "control reaches end of non-void function");
- TREE_NO_WARNING (cfun->decl) = 1;
- break;
- }
- }
- }
- return 0;
-}
-
-
/* Given a basic block B which ends with a conditional and has
precisely two successors, determine which of the edges is taken if
the conditional is true and which is taken if the conditional is
}
}
+/* Emit return warnings. */
+
namespace {
const pass_data pass_data_warn_function_return =
GIMPLE_PASS, /* type */
"*warn_function_return", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
true, /* has_execute */
TV_NONE, /* tv_id */
PROP_cfg, /* properties_required */
{}
/* opt_pass methods: */
- unsigned int execute () { return execute_warn_function_return (); }
+ virtual unsigned int execute (function *);
}; // class pass_warn_function_return
+unsigned int
+pass_warn_function_return::execute (function *fun)
+{
+ source_location location;
+ gimple last;
+ edge e;
+ edge_iterator ei;
+
+ if (!targetm.warn_func_return (fun->decl))
+ return 0;
+
+ /* If we have a path to EXIT, then we do return. */
+ if (TREE_THIS_VOLATILE (fun->decl)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) > 0)
+ {
+ location = UNKNOWN_LOCATION;
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (fun)->preds)
+ {
+ last = last_stmt (e->src);
+ if ((gimple_code (last) == GIMPLE_RETURN
+ || gimple_call_builtin_p (last, BUILT_IN_RETURN))
+ && (location = gimple_location (last)) != UNKNOWN_LOCATION)
+ break;
+ }
+ if (location == UNKNOWN_LOCATION)
+ location = cfun->function_end_locus;
+ warning_at (location, 0, "%<noreturn%> function does return");
+ }
+
+ /* If we see "return;" in some basic block, then we do reach the end
+ without returning a value. */
+ else if (warn_return_type
+ && !TREE_NO_WARNING (fun->decl)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) > 0
+ && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fun->decl))))
+ {
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (fun)->preds)
+ {
+ gimple last = last_stmt (e->src);
+ if (gimple_code (last) == GIMPLE_RETURN
+ && gimple_return_retval (last) == NULL
+ && !gimple_no_warning_p (last))
+ {
+ location = gimple_location (last);
+ if (location == UNKNOWN_LOCATION)
+ location = fun->function_end_locus;
+ warning_at (location, OPT_Wreturn_type, "control reaches end of non-void function");
+ TREE_NO_WARNING (fun->decl) = 1;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
} // anon namespace
gimple_opt_pass *
}
}
-static unsigned int
-run_warn_unused_result (void)
-{
- do_warn_unused_result (gimple_body (current_function_decl));
- return 0;
-}
-
-static bool
-gate_warn_unused_result (void)
-{
- return flag_warn_unused_result;
-}
-
namespace {
const pass_data pass_data_warn_unused_result =
GIMPLE_PASS, /* type */
"*warn_unused_result", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
true, /* has_execute */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
{}
/* opt_pass methods: */
- bool gate () { return gate_warn_unused_result (); }
- unsigned int execute () { return run_warn_unused_result (); }
+ virtual bool gate (function *) { return flag_warn_unused_result; }
+ virtual unsigned int execute (function *)
+ {
+ do_warn_unused_result (gimple_body (current_function_decl));
+ return 0;
+ }
}; // class pass_warn_unused_result
GIMPLE_PASS, /* type */
"*free_cfg_annotations", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
true, /* has_execute */
TV_NONE, /* tv_id */
PROP_cfg, /* properties_required */
/* opt_pass methods: */
opt_pass * clone () { return new pass_fixup_cfg (m_ctxt); }
- unsigned int execute () { return execute_fixup_cfg (); }
+ virtual unsigned int execute (function *) { return execute_fixup_cfg (); }
}; // class pass_fixup_cfg