/* Control flow functions for trees.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
- 2010, 2011, 2012 Free Software Foundation, Inc.
+ Copyright (C) 2001-2013 Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "hash-table.h"
#include "tm.h"
#include "tree.h"
+#include "trans-mem.h"
+#include "stor-layout.h"
+#include "print-tree.h"
#include "tm_p.h"
#include "basic-block.h"
#include "flags.h"
#include "function.h"
-#include "ggc.h"
#include "gimple-pretty-print.h"
-#include "tree-flow.h"
+#include "pointer-set.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-fold.h"
+#include "tree-eh.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "gimple-walk.h"
+#include "gimple-ssa.h"
+#include "cgraph.h"
+#include "tree-cfg.h"
+#include "tree-phinodes.h"
+#include "ssa-iterators.h"
+#include "stringpool.h"
+#include "tree-ssanames.h"
+#include "tree-ssa-loop-manip.h"
+#include "tree-ssa-loop-niter.h"
+#include "tree-into-ssa.h"
+#include "expr.h"
+#include "tree-dfa.h"
+#include "tree-ssa.h"
#include "tree-dump.h"
#include "tree-pass.h"
#include "diagnostic-core.h"
#include "cfgloop.h"
#include "tree-ssa-propagate.h"
#include "value-prof.h"
-#include "pointer-set.h"
#include "tree-inline.h"
#include "target.h"
+#include "tree-ssa-live.h"
+#include "omp-low.h"
+#include "tree-cfgcleanup.h"
/* This file contains functions for building the Control Flow Graph (CFG)
for a function tree. */
location_t locus;
int discriminator;
};
-static htab_t discriminator_per_locus;
+
+/* Hashtable helpers. */
+
+struct locus_discrim_hasher : typed_free_remove <locus_discrim_map>
+{
+ typedef locus_discrim_map value_type;
+ typedef locus_discrim_map compare_type;
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+};
+
+/* Trivial hash function for a location_t. ITEM is a pointer to
+ a hash table entry that maps a location_t to a discriminator. */
+
+inline hashval_t
+locus_discrim_hasher::hash (const value_type *item)
+{
+ return LOCATION_LINE (item->locus);
+}
+
+/* Equality function for the locus-to-discriminator map. A and B
+ point to the two hash table entries to compare. */
+
+inline bool
+locus_discrim_hasher::equal (const value_type *a, const compare_type *b)
+{
+ return LOCATION_LINE (a->locus) == LOCATION_LINE (b->locus);
+}
+
+static hash_table <locus_discrim_hasher> discriminator_per_locus;
/* Basic blocks and flowgraphs. */
static void make_blocks (gimple_seq);
/* Edges. */
static void make_edges (void);
+static void assign_discriminators (void);
static void make_cond_expr_edges (basic_block);
static void make_gimple_switch_edges (basic_block);
static void make_goto_expr_edges (basic_block);
static void make_gimple_asm_edges (basic_block);
-static unsigned int locus_map_hash (const void *);
-static int locus_map_eq (const void *, const void *);
-static void assign_discriminator (location_t, basic_block);
static edge gimple_redirect_edge_and_branch (edge, basic_block);
static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
static unsigned int split_critical_edges (void);
static inline bool stmt_starts_bb_p (gimple, gimple);
static int gimple_verify_flow_info (void);
static void gimple_make_forwarder_block (edge);
-static void gimple_cfg2vcg (FILE *);
static gimple first_non_label_stmt (basic_block);
static bool verify_gimple_transaction (gimple);
{
/* Initialize the basic block array. */
init_flow (fn);
- profile_status_for_function (fn) = PROFILE_ABSENT;
- n_basic_blocks_for_function (fn) = NUM_FIXED_BLOCKS;
- last_basic_block_for_function (fn) = NUM_FIXED_BLOCKS;
- basic_block_info_for_function (fn)
- = VEC_alloc (basic_block, gc, initial_cfg_capacity);
- VEC_safe_grow_cleared (basic_block, gc,
- basic_block_info_for_function (fn),
+ profile_status_for_fn (fn) = PROFILE_ABSENT;
+ n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
+ last_basic_block_for_fn (fn) = NUM_FIXED_BLOCKS;
+ vec_alloc (basic_block_info_for_fn (fn), initial_cfg_capacity);
+ vec_safe_grow_cleared (basic_block_info_for_fn (fn),
initial_cfg_capacity);
/* Build a mapping of labels to their associated blocks. */
- label_to_block_map_for_function (fn)
- = VEC_alloc (basic_block, gc, initial_cfg_capacity);
- VEC_safe_grow_cleared (basic_block, gc,
- label_to_block_map_for_function (fn),
+ vec_alloc (label_to_block_map_for_fn (fn), initial_cfg_capacity);
+ vec_safe_grow_cleared (label_to_block_map_for_fn (fn),
initial_cfg_capacity);
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn));
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn));
+ SET_BASIC_BLOCK_FOR_FN (fn, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (fn));
+ SET_BASIC_BLOCK_FOR_FN (fn, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (fn));
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->next_bb
- = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->prev_bb
- = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
+ ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb
+ = EXIT_BLOCK_PTR_FOR_FN (fn);
+ EXIT_BLOCK_PTR_FOR_FN (fn)->prev_bb
+ = ENTRY_BLOCK_PTR_FOR_FN (fn);
}
void
factor_computed_gotos ();
/* Make sure there is always at least one block, even if it's empty. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
- create_empty_bb (ENTRY_BLOCK_PTR);
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
+ create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Adjust the size of the array. */
- if (VEC_length (basic_block, basic_block_info) < (size_t) n_basic_blocks)
- VEC_safe_grow_cleared (basic_block, gc, basic_block_info, n_basic_blocks);
+ if (basic_block_info_for_fn (cfun)->length ()
+ < (size_t) n_basic_blocks_for_fn (cfun))
+ vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
+ n_basic_blocks_for_fn (cfun));
/* To speed up statement iterator walks, we first purge dead labels. */
cleanup_dead_labels ();
group_case_labels ();
/* Create the edges of the flowgraph. */
- discriminator_per_locus = htab_create (13, locus_map_hash, locus_map_eq,
- free);
+ discriminator_per_locus.create (13);
make_edges ();
+ assign_discriminators ();
cleanup_dead_labels ();
- htab_delete (discriminator_per_locus);
+ discriminator_per_locus.dispose ();
+}
- /* Debugging dumps. */
- /* Write the flowgraph to a VCG file. */
- {
- int local_dump_flags;
- FILE *vcg_file = dump_begin (TDI_vcg, &local_dump_flags);
- if (vcg_file)
- {
- gimple_cfg2vcg (vcg_file);
- dump_end (TDI_vcg, vcg_file);
- }
- }
+/* Search for ANNOTATE call with annot_expr_ivdep_kind; if found, remove
+ it and set loop->safelen to INT_MAX. We assume that the annotation
+ comes immediately before the condition. */
+
+static void
+replace_loop_annotate ()
+{
+ struct loop *loop;
+ basic_block bb;
+ gimple_stmt_iterator gsi;
+ gimple stmt;
+
+ FOR_EACH_LOOP (loop, 0)
+ {
+ gsi = gsi_last_bb (loop->header);
+ stmt = gsi_stmt (gsi);
+ if (stmt && gimple_code (stmt) == GIMPLE_COND)
+ {
+ gsi_prev_nondebug (&gsi);
+ if (gsi_end_p (gsi))
+ continue;
+ stmt = gsi_stmt (gsi);
+ if (gimple_code (stmt) != GIMPLE_CALL)
+ continue;
+ if (!gimple_call_internal_p (stmt)
+ || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
+ continue;
+ if ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1))
+ != annot_expr_ivdep_kind)
+ continue;
+ stmt = gimple_build_assign (gimple_call_lhs (stmt),
+ gimple_call_arg (stmt, 0));
+ gsi_replace (&gsi, stmt, true);
+ loop->safelen = INT_MAX;
+ }
+ }
+
+ /* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
+ FOR_EACH_BB (bb)
+ {
+ gsi = gsi_last_bb (bb);
+ stmt = gsi_stmt (gsi);
+ if (stmt && gimple_code (stmt) == GIMPLE_COND)
+ gsi_prev_nondebug (&gsi);
+ if (gsi_end_p (gsi))
+ continue;
+ stmt = gsi_stmt (gsi);
+ if (gimple_code (stmt) != GIMPLE_CALL)
+ continue;
+ if (!gimple_call_internal_p (stmt)
+ || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
+ continue;
+ if ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1))
+ != annot_expr_ivdep_kind)
+ continue;
+ warning_at (gimple_location (stmt), 0, "ignoring %<GCC ivdep%> "
+ "annotation");
+ stmt = gimple_build_assign (gimple_call_lhs (stmt),
+ gimple_call_arg (stmt, 0));
+ gsi_replace (&gsi, stmt, true);
+ }
}
+
static unsigned int
execute_build_cfg (void)
{
fprintf (dump_file, "Scope blocks:\n");
dump_scope_blocks (dump_file, dump_flags);
}
+ cleanup_tree_cfg ();
+ loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
+ replace_loop_annotate ();
return 0;
}
-struct gimple_opt_pass pass_build_cfg =
-{
- {
- GIMPLE_PASS,
- "cfg", /* name */
- NULL, /* gate */
- execute_build_cfg, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_CFG, /* tv_id */
- PROP_gimple_leh, /* properties_required */
- PROP_cfg, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_verify_stmts | TODO_cleanup_cfg /* todo_flags_finish */
- }
+namespace {
+
+const pass_data pass_data_build_cfg =
+{
+ GIMPLE_PASS, /* type */
+ "cfg", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ false, /* has_gate */
+ true, /* has_execute */
+ TV_TREE_CFG, /* tv_id */
+ PROP_gimple_leh, /* properties_required */
+ ( PROP_cfg | PROP_loops ), /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_verify_stmts, /* todo_flags_finish */
};
+class pass_build_cfg : public gimple_opt_pass
+{
+public:
+ pass_build_cfg (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_build_cfg, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ unsigned int execute () { return execute_build_cfg (); }
+
+}; // class pass_build_cfg
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_build_cfg (gcc::context *ctxt)
+{
+ return new pass_build_cfg (ctxt);
+}
+
/* Return true if T is a computed goto. */
&& TREE_CODE (gimple_goto_dest (t)) != LABEL_DECL);
}
+/* Returns true for edge E where e->src ends with a GIMPLE_COND and
+ the other edge points to a bb with just __builtin_unreachable ().
+ I.e. return true for C->M edge in:
+ <bb C>:
+ ...
+ if (something)
+ goto <bb N>;
+ else
+ goto <bb M>;
+ <bb N>:
+ __builtin_unreachable ();
+ <bb M>: */
+
+bool
+assert_unreachable_fallthru_edge_p (edge e)
+{
+ basic_block pred_bb = e->src;
+ gimple last = last_stmt (pred_bb);
+ if (last && gimple_code (last) == GIMPLE_COND)
+ {
+ basic_block other_bb = EDGE_SUCC (pred_bb, 0)->dest;
+ if (other_bb == e->dest)
+ other_bb = EDGE_SUCC (pred_bb, 1)->dest;
+ if (EDGE_COUNT (other_bb->succs) == 0)
+ {
+ gimple_stmt_iterator gsi = gsi_after_labels (other_bb);
+ gimple stmt;
+
+ if (gsi_end_p (gsi))
+ return false;
+ stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (stmt))
+ {
+ gsi_next_nondebug (&gsi);
+ if (gsi_end_p (gsi))
+ return false;
+ stmt = gsi_stmt (gsi);
+ }
+ return gimple_call_builtin_p (stmt, BUILT_IN_UNREACHABLE);
+ }
+ }
+ return false;
+}
+
/* Search the CFG for any computed gotos. If found, factor them to a
common computed goto site. Also record the location of that site so
gimple stmt = NULL;
bool start_new_block = true;
bool first_stmt_of_seq = true;
- basic_block bb = ENTRY_BLOCK_PTR;
+ basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
while (!gsi_end_p (i))
{
link_block (bb, after);
/* Grow the basic block array if needed. */
- if ((size_t) last_basic_block == VEC_length (basic_block, basic_block_info))
+ if ((size_t) last_basic_block == basic_block_info_for_fn (cfun)->length ())
{
size_t new_size = last_basic_block + (last_basic_block + 3) / 4;
- VEC_safe_grow_cleared (basic_block, gc, basic_block_info, new_size);
+ vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
}
/* Add the newly created block to the array. */
- SET_BASIC_BLOCK (last_basic_block, bb);
+ SET_BASIC_BLOCK_FOR_FN (cfun, last_basic_block, bb);
- n_basic_blocks++;
+ n_basic_blocks_for_fn (cfun)++;
last_basic_block++;
return bb;
/* Create an edge from entry to the first block with executable
statements in it. */
- make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (NUM_FIXED_BLOCKS), EDGE_FALLTHRU);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS),
+ EDGE_FALLTHRU);
/* Traverse the basic block array placing edges. */
FOR_EACH_BB (bb)
fallthru = false;
break;
case GIMPLE_RETURN:
- make_edge (bb, EXIT_BLOCK_PTR, 0);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
fallthru = false;
break;
case GIMPLE_COND:
/* BUILTIN_RETURN is really a return statement. */
if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
- make_edge (bb, EXIT_BLOCK_PTR, 0), fallthru = false;
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0), fallthru =
+ false;
/* Some calls are known not to return. */
else
fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
fallthru = true;
break;
- case GIMPLE_OMP_PARALLEL:
- case GIMPLE_OMP_TASK:
- case GIMPLE_OMP_FOR:
- case GIMPLE_OMP_SINGLE:
- case GIMPLE_OMP_MASTER:
- case GIMPLE_OMP_ORDERED:
- case GIMPLE_OMP_CRITICAL:
- case GIMPLE_OMP_SECTION:
- cur_region = new_omp_region (bb, code, cur_region);
- fallthru = true;
- break;
-
- case GIMPLE_OMP_SECTIONS:
- cur_region = new_omp_region (bb, code, cur_region);
- fallthru = true;
- break;
-
- case GIMPLE_OMP_SECTIONS_SWITCH:
- fallthru = false;
- break;
-
- case GIMPLE_OMP_ATOMIC_LOAD:
- case GIMPLE_OMP_ATOMIC_STORE:
- fallthru = true;
- break;
-
- case GIMPLE_OMP_RETURN:
- /* In the case of a GIMPLE_OMP_SECTION, the edge will go
- somewhere other than the next block. This will be
- created later. */
- cur_region->exit = bb;
- fallthru = cur_region->type != GIMPLE_OMP_SECTION;
- cur_region = cur_region->outer;
- break;
-
- case GIMPLE_OMP_CONTINUE:
- cur_region->cont = bb;
- switch (cur_region->type)
- {
- case GIMPLE_OMP_FOR:
- /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
- succs edges as abnormal to prevent splitting
- them. */
- single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
- /* Make the loopback edge. */
- make_edge (bb, single_succ (cur_region->entry),
- EDGE_ABNORMAL);
-
- /* Create an edge from GIMPLE_OMP_FOR to exit, which
- corresponds to the case that the body of the loop
- is not executed at all. */
- make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
- make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
- fallthru = false;
- break;
-
- case GIMPLE_OMP_SECTIONS:
- /* Wire up the edges into and out of the nested sections. */
- {
- basic_block switch_bb = single_succ (cur_region->entry);
-
- struct omp_region *i;
- for (i = cur_region->inner; i ; i = i->next)
- {
- gcc_assert (i->type == GIMPLE_OMP_SECTION);
- make_edge (switch_bb, i->entry, 0);
- make_edge (i->exit, bb, EDGE_FALLTHRU);
- }
-
- /* Make the loopback edge to the block with
- GIMPLE_OMP_SECTIONS_SWITCH. */
- make_edge (bb, switch_bb, 0);
-
- /* Make the edge from the switch to exit. */
- make_edge (switch_bb, bb->next_bb, 0);
- fallthru = false;
- }
- break;
-
- default:
- gcc_unreachable ();
- }
+ CASE_GIMPLE_OMP:
+ fallthru = make_gimple_omp_edges (bb, &cur_region);
break;
case GIMPLE_TRANSACTION:
{
tree abort_label = gimple_transaction_label (last);
if (abort_label)
- make_edge (bb, label_to_block (abort_label), 0);
+ make_edge (bb, label_to_block (abort_label), EDGE_TM_ABORT);
fallthru = true;
}
break;
fallthru = true;
if (fallthru)
- {
- make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
- if (last)
- assign_discriminator (gimple_location (last), bb->next_bb);
- }
+ make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
}
- if (root_omp_region)
- free_omp_regions ();
+ free_omp_regions ();
/* Fold COND_EXPR_COND of each COND_EXPR. */
fold_cond_expr_cond ();
}
-/* Trivial hash function for a location_t. ITEM is a pointer to
- a hash table entry that maps a location_t to a discriminator. */
-
-static unsigned int
-locus_map_hash (const void *item)
-{
- return ((const struct locus_discrim_map *) item)->locus;
-}
-
-/* Equality function for the locus-to-discriminator map. VA and VB
- point to the two hash table entries to compare. */
-
-static int
-locus_map_eq (const void *va, const void *vb)
-{
- const struct locus_discrim_map *a = (const struct locus_discrim_map *) va;
- const struct locus_discrim_map *b = (const struct locus_discrim_map *) vb;
- return a->locus == b->locus;
-}
-
/* Find the next available discriminator value for LOCUS. The
discriminator distinguishes among several basic blocks that
share a common locus, allowing for more accurate sample-based
item.locus = locus;
item.discriminator = 0;
- slot = (struct locus_discrim_map **)
- htab_find_slot_with_hash (discriminator_per_locus, (void *) &item,
- (hashval_t) locus, INSERT);
+ slot = discriminator_per_locus.find_slot_with_hash (
+ &item, LOCATION_LINE (locus), INSERT);
gcc_assert (slot);
if (*slot == HTAB_EMPTY_ENTRY)
{
&& filename_cmp (from.file, to.file) == 0);
}
-/* Assign a unique discriminator value to block BB if it begins at the same
- LOCUS as its predecessor block. */
+/* Assign discriminators to each basic block. */
static void
-assign_discriminator (location_t locus, basic_block bb)
+assign_discriminators (void)
{
- gimple first_in_to_bb, last_in_to_bb;
+ basic_block bb;
- if (locus == 0 || bb->discriminator != 0)
- return;
+ FOR_EACH_BB (bb)
+ {
+ edge e;
+ edge_iterator ei;
+ gimple last = last_stmt (bb);
+ location_t locus = last ? gimple_location (last) : UNKNOWN_LOCATION;
+
+ if (locus == UNKNOWN_LOCATION)
+ continue;
- first_in_to_bb = first_non_label_stmt (bb);
- last_in_to_bb = last_stmt (bb);
- if ((first_in_to_bb && same_line_p (locus, gimple_location (first_in_to_bb)))
- || (last_in_to_bb && same_line_p (locus, gimple_location (last_in_to_bb))))
- bb->discriminator = next_discriminator_for_locus (locus);
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ gimple first = first_non_label_stmt (e->dest);
+ gimple last = last_stmt (e->dest);
+ if ((first && same_line_p (locus, gimple_location (first)))
+ || (last && same_line_p (locus, gimple_location (last))))
+ {
+ if (e->dest->discriminator != 0 && bb->discriminator == 0)
+ bb->discriminator = next_discriminator_for_locus (locus);
+ else
+ e->dest->discriminator = next_discriminator_for_locus (locus);
+ }
+ }
+ }
}
/* Create the edges for a GIMPLE_COND starting at block BB. */
basic_block then_bb, else_bb;
tree then_label, else_label;
edge e;
- location_t entry_locus;
gcc_assert (entry);
gcc_assert (gimple_code (entry) == GIMPLE_COND);
- entry_locus = gimple_location (entry);
-
/* Entry basic blocks for each component. */
then_label = gimple_cond_true_label (entry);
else_label = gimple_cond_false_label (entry);
else_stmt = first_stmt (else_bb);
e = make_edge (bb, then_bb, EDGE_TRUE_VALUE);
- assign_discriminator (entry_locus, then_bb);
e->goto_locus = gimple_location (then_stmt);
- if (e->goto_locus)
- e->goto_block = gimple_block (then_stmt);
e = make_edge (bb, else_bb, EDGE_FALSE_VALUE);
if (e)
- {
- assign_discriminator (entry_locus, else_bb);
- e->goto_locus = gimple_location (else_stmt);
- if (e->goto_locus)
- e->goto_block = gimple_block (else_stmt);
- }
+ e->goto_locus = gimple_location (else_stmt);
/* We do not need the labels anymore. */
gimple_cond_set_true_label (entry, NULL_TREE);
edge_to_cases = NULL;
EXECUTE_IF_SET_IN_BITMAP (touched_switch_bbs, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (bb)
{
gimple stmt = last_stmt (bb);
make_gimple_switch_edges (basic_block bb)
{
gimple entry = last_stmt (bb);
- location_t entry_locus;
size_t i, n;
- entry_locus = gimple_location (entry);
-
n = gimple_switch_num_labels (entry);
for (i = 0; i < n; ++i)
tree lab = CASE_LABEL (gimple_switch_label (entry, i));
basic_block label_bb = label_to_block (lab);
make_edge (bb, label_bb, 0);
- assign_discriminator (entry_locus, label_bb);
}
}
and undefined variable warnings quite right. */
if (seen_error () && uid < 0)
{
- gimple_stmt_iterator gsi = gsi_start_bb (BASIC_BLOCK (NUM_FIXED_BLOCKS));
+ gimple_stmt_iterator gsi =
+ gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS));
gimple stmt;
stmt = gimple_build_label (dest);
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
uid = LABEL_DECL_UID (dest);
}
- if (VEC_length (basic_block, ifun->cfg->x_label_to_block_map)
- <= (unsigned int) uid)
+ if (vec_safe_length (ifun->cfg->x_label_to_block_map) <= (unsigned int) uid)
return NULL;
- return VEC_index (basic_block, ifun->cfg->x_label_to_block_map, uid);
+ return (*ifun->cfg->x_label_to_block_map)[uid];
}
/* Create edges for an abnormal goto statement at block BB. If FOR_CALL
gimple_stmt_iterator gsi;
FOR_EACH_BB (target_bb)
- for (gsi = gsi_start_bb (target_bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple label_stmt = gsi_stmt (gsi);
- tree target;
+ {
+ for (gsi = gsi_start_bb (target_bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple label_stmt = gsi_stmt (gsi);
+ tree target;
- if (gimple_code (label_stmt) != GIMPLE_LABEL)
- break;
+ if (gimple_code (label_stmt) != GIMPLE_LABEL)
+ break;
- target = gimple_label_label (label_stmt);
+ target = gimple_label_label (label_stmt);
- /* Make an edge to every label block that has been marked as a
- potential target for a computed goto or a non-local goto. */
- if ((FORCED_LABEL (target) && !for_call)
- || (DECL_NONLOCAL (target) && for_call))
- {
+ /* Make an edge to every label block that has been marked as a
+ potential target for a computed goto or a non-local goto. */
+ if ((FORCED_LABEL (target) && !for_call)
+ || (DECL_NONLOCAL (target) && for_call))
+ {
+ make_edge (bb, target_bb, EDGE_ABNORMAL);
+ break;
+ }
+ }
+ if (!gsi_end_p (gsi)
+ && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+ if (!gsi_end_p (gsi))
+ {
+ /* Make an edge to every setjmp-like call. */
+ gimple call_stmt = gsi_stmt (gsi);
+ if (is_gimple_call (call_stmt)
+ && (gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE))
make_edge (bb, target_bb, EDGE_ABNORMAL);
- break;
- }
- }
+ }
+ }
}
/* Create edges for a goto statement at block BB. */
basic_block label_bb = label_to_block (dest);
edge e = make_edge (bb, label_bb, EDGE_FALLTHRU);
e->goto_locus = gimple_location (goto_t);
- assign_discriminator (e->goto_locus, label_bb);
- if (e->goto_locus)
- e->goto_block = gimple_block (goto_t);
gsi_remove (&last, true);
return;
}
make_gimple_asm_edges (basic_block bb)
{
gimple stmt = last_stmt (bb);
- location_t stmt_loc = gimple_location (stmt);
int i, n = gimple_asm_nlabels (stmt);
for (i = 0; i < n; ++i)
tree label = TREE_VALUE (gimple_asm_label_op (stmt, i));
basic_block label_bb = label_to_block (label);
make_edge (bb, label_bb, 0);
- assign_discriminator (stmt_loc, label_bb);
}
}
if (cfun->eh == NULL)
return;
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
if (lp && lp->post_landing_pad)
{
lab = main_block_label (lp->post_landing_pad);
int old_size = gimple_switch_num_labels (stmt);
int i, j, new_size = old_size;
basic_block default_bb = NULL;
- bool has_default;
- /* The default label is always the first case in a switch
- statement after gimplification if it was not optimized
- away */
- if (!CASE_LOW (gimple_switch_default_label (stmt))
- && !CASE_HIGH (gimple_switch_default_label (stmt)))
- {
- tree default_case = gimple_switch_default_label (stmt);
- default_bb = label_to_block (CASE_LABEL (default_case));
- has_default = true;
- }
- else
- has_default = false;
+ default_bb = label_to_block (CASE_LABEL (gimple_switch_default_label (stmt)));
/* Look for possible opportunities to merge cases. */
- if (has_default)
- i = 1;
- else
- i = 0;
+ i = 1;
while (i < old_size)
{
tree base_case, base_high;
{
tree merge_case = gimple_switch_label (stmt, i);
basic_block merge_bb = label_to_block (CASE_LABEL (merge_case));
- double_int bhp1 = double_int_add (tree_to_double_int (base_high),
- double_int_one);
+ double_int bhp1 = tree_to_double_int (base_high) + double_int_one;
/* Merge the cases if they jump to the same place,
and their ranges are consecutive. */
if (merge_bb == base_bb
- && double_int_equal_p (tree_to_double_int (CASE_LOW (merge_case)),
- bhp1))
+ && tree_to_double_int (CASE_LOW (merge_case)) == bhp1)
{
base_high = CASE_HIGH (merge_case) ?
CASE_HIGH (merge_case) : CASE_LOW (merge_case);
if (!single_pred_p (b))
return false;
- if (b == EXIT_BLOCK_PTR)
+ if (b == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
/* If A ends by a statement causing exceptions or something similar, we
return true;
}
-/* Return true if the var whose chain of uses starts at PTR has no
- nondebug uses. */
-bool
-has_zero_uses_1 (const ssa_use_operand_t *head)
-{
- const ssa_use_operand_t *ptr;
-
- for (ptr = head->next; ptr != head; ptr = ptr->next)
- if (!is_gimple_debug (USE_STMT (ptr)))
- return false;
-
- return true;
-}
-
-/* Return true if the var whose chain of uses starts at PTR has a
- single nondebug use. Set USE_P and STMT to that single nondebug
- use, if so, or to NULL otherwise. */
-bool
-single_imm_use_1 (const ssa_use_operand_t *head,
- use_operand_p *use_p, gimple *stmt)
-{
- ssa_use_operand_t *ptr, *single_use = 0;
-
- for (ptr = head->next; ptr != head; ptr = ptr->next)
- if (!is_gimple_debug (USE_STMT (ptr)))
- {
- if (single_use)
- {
- single_use = NULL;
- break;
- }
- single_use = ptr;
- }
-
- if (use_p)
- *use_p = single_use;
-
- if (stmt)
- *stmt = single_use ? single_use->loc.stmt : NULL;
-
- return !!single_use;
-}
-
/* Replaces all uses of NAME by VAL. */
void
/* This can only occur for virtual operands, since
for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
would prevent replacement. */
- gcc_checking_assert (!is_gimple_reg (name));
+ gcc_checking_assert (virtual_operand_p (name));
SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1;
}
}
if (current_loops)
{
struct loop *loop;
- loop_iterator li;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
substitute_in_loop_info (loop, name, val);
}
gimple phi = gsi_stmt (psi);
tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
gimple copy;
- bool may_replace_uses = !is_gimple_reg (def)
- || may_propagate_copy (def, use);
+ bool may_replace_uses = (virtual_operand_p (def)
+ || may_propagate_copy (def, use));
/* In case we maintain loop closed ssa form, do not propagate arguments
of loop exit phi nodes. */
if (current_loops
&& loops_state_satisfies_p (LOOP_CLOSED_SSA)
- && is_gimple_reg (def)
+ && !virtual_operand_p (def)
&& TREE_CODE (use) == SSA_NAME
&& a->loop_father != b->loop_father)
may_replace_uses = false;
if (!may_replace_uses)
{
- gcc_assert (is_gimple_reg (def));
+ gcc_assert (!virtual_operand_p (def));
/* Note that just emitting the copies is fine -- there is no problem
with ordering of phi nodes. This is because A is the single
/* If we deal with a PHI for virtual operands, we can simply
propagate these without fussing with folding or updating
the stmt. */
- if (!is_gimple_reg (def))
+ if (virtual_operand_p (def))
{
imm_use_iterator iter;
use_operand_p use_p;
basic_block
gimple_debug_bb_n (int n)
{
- gimple_debug_bb (BASIC_BLOCK (n));
- return BASIC_BLOCK (n);
+ gimple_debug_bb (BASIC_BLOCK_FOR_FN (cfun, n));
+ return BASIC_BLOCK_FOR_FN (cfun, n);
}
/* Dump the CFG on stderr.
FLAGS are the same used by the tree dumping functions
- (see TDF_* in tree-pass.h). */
+ (see TDF_* in dumpfile.h). */
void
gimple_debug_cfg (int flags)
{
dump_function_header (file, current_function_decl, flags);
fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
- n_basic_blocks, n_edges, last_basic_block);
+ n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
+ last_basic_block);
brief_dump_cfg (file, flags | TDF_COMMENT);
fprintf (file, "\n");
fprintf (file, fmt_str, "", " instances ", "used ");
fprintf (file, "---------------------------------------------------------\n");
- size = n_basic_blocks * sizeof (struct basic_block_def);
+ size = n_basic_blocks_for_fn (cfun) * sizeof (struct basic_block_def);
total += size;
- fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks,
+ fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks_for_fn (cfun),
SCALE (size), LABEL (size));
num_edges = 0;
dump_cfg_stats (stderr);
}
-
-/* Dump the flowgraph to a .vcg FILE. */
-
-static void
-gimple_cfg2vcg (FILE *file)
-{
- edge e;
- edge_iterator ei;
- basic_block bb;
- const char *funcname = current_function_name ();
-
- /* Write the file header. */
- fprintf (file, "graph: { title: \"%s\"\n", funcname);
- fprintf (file, "node: { title: \"ENTRY\" label: \"ENTRY\" }\n");
- fprintf (file, "node: { title: \"EXIT\" label: \"EXIT\" }\n");
-
- /* Write blocks and edges. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
- {
- fprintf (file, "edge: { sourcename: \"ENTRY\" targetname: \"%d\"",
- e->dest->index);
-
- if (e->flags & EDGE_FAKE)
- fprintf (file, " linestyle: dotted priority: 10");
- else
- fprintf (file, " linestyle: solid priority: 100");
-
- fprintf (file, " }\n");
- }
- fputc ('\n', file);
-
- FOR_EACH_BB (bb)
- {
- enum gimple_code head_code, end_code;
- const char *head_name, *end_name;
- int head_line = 0;
- int end_line = 0;
- gimple first = first_stmt (bb);
- gimple last = last_stmt (bb);
-
- if (first)
- {
- head_code = gimple_code (first);
- head_name = gimple_code_name[head_code];
- head_line = get_lineno (first);
- }
- else
- head_name = "no-statement";
-
- if (last)
- {
- end_code = gimple_code (last);
- end_name = gimple_code_name[end_code];
- end_line = get_lineno (last);
- }
- else
- end_name = "no-statement";
-
- fprintf (file, "node: { title: \"%d\" label: \"#%d\\n%s (%d)\\n%s (%d)\"}\n",
- bb->index, bb->index, head_name, head_line, end_name,
- end_line);
-
- FOR_EACH_EDGE (e, ei, bb->succs)
- {
- if (e->dest == EXIT_BLOCK_PTR)
- fprintf (file, "edge: { sourcename: \"%d\" targetname: \"EXIT\"", bb->index);
- else
- fprintf (file, "edge: { sourcename: \"%d\" targetname: \"%d\"", bb->index, e->dest->index);
-
- if (e->flags & EDGE_FAKE)
- fprintf (file, " priority: 10 linestyle: dotted");
- else
- fprintf (file, " priority: 100 linestyle: solid");
-
- fprintf (file, " }\n");
- }
-
- if (bb->next_bb != EXIT_BLOCK_PTR)
- fputc ('\n', file);
- }
-
- fputs ("}\n\n", file);
-}
-
-
-
/*---------------------------------------------------------------------------
Miscellaneous helpers
---------------------------------------------------------------------------*/
{
/* If the function has no non-local labels, then a call cannot make an
abnormal transfer of control. */
- if (!cfun->has_nonlocal_label)
+ if (!cfun->has_nonlocal_label
+ && !cfun->calls_setjmp)
return false;
/* Likewise if the call has no side effects. */
else
return true;
}
+ else if (gimple_code (stmt) == GIMPLE_CALL
+ && gimple_call_flags (stmt) & ECF_RETURNS_TWICE)
+ /* setjmp acts similar to a nonlocal GOTO target and thus should
+ start a new block. */
+ return true;
return false;
}
void
delete_tree_cfg_annotations (void)
{
- label_to_block_map = NULL;
+ vec_free (label_to_block_map);
}
static void
reinstall_phi_args (edge new_edge, edge old_edge)
{
- edge_var_map_vector v;
+ edge_var_map_vector *v;
edge_var_map *vm;
int i;
gimple_stmt_iterator phis;
return;
for (i = 0, phis = gsi_start_phis (new_edge->dest);
- VEC_iterate (edge_var_map, v, i, vm) && !gsi_end_p (phis);
+ v->iterate (i, &vm) && !gsi_end_p (phis);
i++, gsi_next (&phis))
{
gimple phi = gsi_stmt (phis);
case REALPART_EXPR:
case IMAGPART_EXPR:
+ case BIT_FIELD_REF:
+ if (!is_gimple_reg_type (TREE_TYPE (t)))
+ {
+ error ("non-scalar BIT_FIELD_REF, IMAGPART_EXPR or REALPART_EXPR");
+ return t;
+ }
+
+ if (TREE_CODE (t) == BIT_FIELD_REF)
+ {
+ tree t0 = TREE_OPERAND (t, 0);
+ tree t1 = TREE_OPERAND (t, 1);
+ tree t2 = TREE_OPERAND (t, 2);
+ if (!tree_fits_uhwi_p (t1)
+ || !tree_fits_uhwi_p (t2))
+ {
+ error ("invalid position or size operand to BIT_FIELD_REF");
+ return t;
+ }
+ if (INTEGRAL_TYPE_P (TREE_TYPE (t))
+ && (TYPE_PRECISION (TREE_TYPE (t))
+ != tree_to_uhwi (t1)))
+ {
+ error ("integral result type precision does not match "
+ "field size of BIT_FIELD_REF");
+ return t;
+ }
+ else if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
+ && TYPE_MODE (TREE_TYPE (t)) != BLKmode
+ && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (t)))
+ != tree_to_uhwi (t1)))
+ {
+ error ("mode precision of non-integral result does not "
+ "match field size of BIT_FIELD_REF");
+ return t;
+ }
+ if (!AGGREGATE_TYPE_P (TREE_TYPE (t0))
+ && (tree_to_uhwi (t1) + tree_to_uhwi (t2)
+ > tree_to_uhwi (TYPE_SIZE (TREE_TYPE (t0)))))
+ {
+ error ("position plus size exceeds size of referenced object in "
+ "BIT_FIELD_REF");
+ return t;
+ }
+ }
+ t = TREE_OPERAND (t, 0);
+
+ /* Fall-through. */
case COMPONENT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
- case BIT_FIELD_REF:
case VIEW_CONVERT_EXPR:
/* We have a nest of references. Verify that each of the operands
that determine where to reference is either a constant or a variable,
if (TREE_OPERAND (t, 3))
CHECK_OP (3, "invalid array stride");
}
- else if (TREE_CODE (t) == BIT_FIELD_REF)
+ else if (TREE_CODE (t) == BIT_FIELD_REF
+ || TREE_CODE (t) == REALPART_EXPR
+ || TREE_CODE (t) == IMAGPART_EXPR)
{
- if (!host_integerp (TREE_OPERAND (t, 1), 1)
- || !host_integerp (TREE_OPERAND (t, 2), 1))
- {
- error ("invalid position or size operand to BIT_FIELD_REF");
- return t;
- }
- if (INTEGRAL_TYPE_P (TREE_TYPE (t))
- && (TYPE_PRECISION (TREE_TYPE (t))
- != TREE_INT_CST_LOW (TREE_OPERAND (t, 1))))
- {
- error ("integral result type precision does not match "
- "field size of BIT_FIELD_REF");
- return t;
- }
- else if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
- && !AGGREGATE_TYPE_P (TREE_TYPE (t))
- && TYPE_MODE (TREE_TYPE (t)) != BLKmode
- && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (t)))
- != TREE_INT_CST_LOW (TREE_OPERAND (t, 1))))
- {
- error ("mode precision of non-integral result does not "
- "match field size of BIT_FIELD_REF");
- return t;
- }
+ error ("non-top-level BIT_FIELD_REF, IMAGPART_EXPR or "
+ "REALPART_EXPR");
+ return t;
}
t = TREE_OPERAND (t, 0);
if (INTEGRAL_TYPE_P (type)
&& (TREE_CODE (type) == BOOLEAN_TYPE
|| TYPE_PRECISION (type) == 1))
- ;
+ {
+ if (TREE_CODE (op0_type) == VECTOR_TYPE
+ || TREE_CODE (op1_type) == VECTOR_TYPE)
+ {
+ error ("vector comparison returning a boolean");
+ debug_generic_expr (op0_type);
+ debug_generic_expr (op1_type);
+ return true;
+ }
+ }
/* Or an integer vector type with the same size and element count
as the comparison operand types. */
else if (TREE_CODE (type) == VECTOR_TYPE
if (TYPE_VECTOR_SUBPARTS (type) != TYPE_VECTOR_SUBPARTS (op0_type)
|| (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type)))
- != GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op0_type)))))
+ != GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op0_type))))
+ /* The result of a vector comparison is of signed
+ integral type. */
+ || TYPE_UNSIGNED (TREE_TYPE (type)))
{
error ("invalid vector comparison resulting type");
debug_generic_expr (type);
{
if ((!INTEGRAL_TYPE_P (rhs1_type) || !SCALAR_FLOAT_TYPE_P (lhs_type))
&& (!VECTOR_INTEGER_TYPE_P (rhs1_type)
- || !VECTOR_FLOAT_TYPE_P(lhs_type)))
+ || !VECTOR_FLOAT_TYPE_P (lhs_type)))
{
error ("invalid types in conversion to floating point");
debug_generic_expr (lhs_type);
{
if ((!INTEGRAL_TYPE_P (lhs_type) || !SCALAR_FLOAT_TYPE_P (rhs1_type))
&& (!VECTOR_INTEGER_TYPE_P (lhs_type)
- || !VECTOR_FLOAT_TYPE_P(rhs1_type)))
+ || !VECTOR_FLOAT_TYPE_P (rhs1_type)))
{
error ("invalid types in conversion to integer");
debug_generic_expr (lhs_type);
case PLUS_EXPR:
case MINUS_EXPR:
{
- /* We use regular PLUS_EXPR and MINUS_EXPR for vectors.
- ??? This just makes the checker happy and may not be what is
- intended. */
- if (TREE_CODE (lhs_type) == VECTOR_TYPE
- && POINTER_TYPE_P (TREE_TYPE (lhs_type)))
+ tree lhs_etype = lhs_type;
+ tree rhs1_etype = rhs1_type;
+ tree rhs2_etype = rhs2_type;
+ if (TREE_CODE (lhs_type) == VECTOR_TYPE)
{
if (TREE_CODE (rhs1_type) != VECTOR_TYPE
|| TREE_CODE (rhs2_type) != VECTOR_TYPE)
error ("invalid non-vector operands to vector valued plus");
return true;
}
- lhs_type = TREE_TYPE (lhs_type);
- rhs1_type = TREE_TYPE (rhs1_type);
- rhs2_type = TREE_TYPE (rhs2_type);
- /* PLUS_EXPR is commutative, so we might end up canonicalizing
- the pointer to 2nd place. */
- if (POINTER_TYPE_P (rhs2_type))
- {
- tree tem = rhs1_type;
- rhs1_type = rhs2_type;
- rhs2_type = tem;
- }
- goto do_pointer_plus_expr_check;
+ lhs_etype = TREE_TYPE (lhs_type);
+ rhs1_etype = TREE_TYPE (rhs1_type);
+ rhs2_etype = TREE_TYPE (rhs2_type);
}
- if (POINTER_TYPE_P (lhs_type)
- || POINTER_TYPE_P (rhs1_type)
- || POINTER_TYPE_P (rhs2_type))
+ if (POINTER_TYPE_P (lhs_etype)
+ || POINTER_TYPE_P (rhs1_etype)
+ || POINTER_TYPE_P (rhs2_etype))
{
error ("invalid (pointer) operands to plus/minus");
return true;
case POINTER_PLUS_EXPR:
{
-do_pointer_plus_expr_check:
if (!POINTER_TYPE_P (rhs1_type)
|| !useless_type_conversion_p (lhs_type, rhs1_type)
|| !ptrofftype_p (rhs2_type))
return true;
}
+ if (gimple_clobber_p (stmt)
+ && !(DECL_P (lhs) || TREE_CODE (lhs) == MEM_REF))
+ {
+ error ("non-decl/MEM_REF LHS in clobber statement");
+ debug_generic_expr (lhs);
+ return true;
+ }
+
if (handled_component_p (lhs))
res |= verify_types_in_gimple_reference (lhs, true);
return res;
case CONSTRUCTOR:
- case OBJ_TYPE_REF:
- case ASSERT_EXPR:
- case WITH_SIZE_EXPR:
- /* FIXME. */
- return res;
-
+ if (TREE_CODE (rhs1_type) == VECTOR_TYPE)
+ {
+ unsigned int i;
+ tree elt_i, elt_v, elt_t = NULL_TREE;
+
+ if (CONSTRUCTOR_NELTS (rhs1) == 0)
+ return res;
+ /* For vector CONSTRUCTORs we require that either it is empty
+ CONSTRUCTOR, or it is a CONSTRUCTOR of smaller vector elements
+ (then the element count must be correct to cover the whole
+ outer vector and index must be NULL on all elements, or it is
+ a CONSTRUCTOR of scalar elements, where we as an exception allow
+ smaller number of elements (assuming zero filling) and
+ consecutive indexes as compared to NULL indexes (such
+ CONSTRUCTORs can appear in the IL from FEs). */
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (rhs1), i, elt_i, elt_v)
+ {
+ if (elt_t == NULL_TREE)
+ {
+ elt_t = TREE_TYPE (elt_v);
+ if (TREE_CODE (elt_t) == VECTOR_TYPE)
+ {
+ tree elt_t = TREE_TYPE (elt_v);
+ if (!useless_type_conversion_p (TREE_TYPE (rhs1_type),
+ TREE_TYPE (elt_t)))
+ {
+ error ("incorrect type of vector CONSTRUCTOR"
+ " elements");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
+ else if (CONSTRUCTOR_NELTS (rhs1)
+ * TYPE_VECTOR_SUBPARTS (elt_t)
+ != TYPE_VECTOR_SUBPARTS (rhs1_type))
+ {
+ error ("incorrect number of vector CONSTRUCTOR"
+ " elements");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
+ }
+ else if (!useless_type_conversion_p (TREE_TYPE (rhs1_type),
+ elt_t))
+ {
+ error ("incorrect type of vector CONSTRUCTOR elements");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
+ else if (CONSTRUCTOR_NELTS (rhs1)
+ > TYPE_VECTOR_SUBPARTS (rhs1_type))
+ {
+ error ("incorrect number of vector CONSTRUCTOR elements");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
+ }
+ else if (!useless_type_conversion_p (elt_t, TREE_TYPE (elt_v)))
+ {
+ error ("incorrect type of vector CONSTRUCTOR elements");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
+ if (elt_i != NULL_TREE
+ && (TREE_CODE (elt_t) == VECTOR_TYPE
+ || TREE_CODE (elt_i) != INTEGER_CST
+ || compare_tree_int (elt_i, i) != 0))
+ {
+ error ("vector CONSTRUCTOR with non-NULL element index");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
+ }
+ }
+ return res;
+ case OBJ_TYPE_REF:
+ case ASSERT_EXPR:
+ case WITH_SIZE_EXPR:
+ /* FIXME. */
+ return res;
+
default:;
}
if ((TREE_CODE (op) == RESULT_DECL
&& DECL_BY_REFERENCE (op))
|| (TREE_CODE (op) == SSA_NAME
+ && SSA_NAME_VAR (op)
&& TREE_CODE (SSA_NAME_VAR (op)) == RESULT_DECL
&& DECL_BY_REFERENCE (SSA_NAME_VAR (op))))
op = TREE_TYPE (op);
return true;
}
- elt = gimple_switch_default_label (stmt);
+ elt = gimple_switch_label (stmt, 0);
if (CASE_LOW (elt) != NULL_TREE || CASE_HIGH (elt) != NULL_TREE)
{
error ("invalid default case label in switch statement");
if (TREE_CODE (decl) != LABEL_DECL)
return true;
+ if (!DECL_NONLOCAL (decl) && !FORCED_LABEL (decl)
+ && DECL_CONTEXT (decl) != current_function_decl)
+ {
+ error ("label's context is not the current function decl");
+ err |= true;
+ }
uid = LABEL_DECL_UID (decl);
if (cfun->cfg
- && (uid == -1
- || VEC_index (basic_block,
- label_to_block_map, uid) != gimple_bb (stmt)))
+ && (uid == -1 || (*label_to_block_map)[uid] != gimple_bb (stmt)))
{
error ("incorrect entry in label_to_block_map");
err |= true;
return true;
}
- virtual_p = !is_gimple_reg (phi_result);
+ virtual_p = virtual_operand_p (phi_result);
if (TREE_CODE (phi_result) != SSA_NAME
|| (virtual_p
&& SSA_NAME_VAR (phi_result) != gimple_vop (cfun)))
/* Addressable variables do have SSA_NAMEs but they
are not considered gimple values. */
else if ((TREE_CODE (t) == SSA_NAME
- && virtual_p != !is_gimple_reg (t))
+ && virtual_p != virtual_operand_p (t))
|| (virtual_p
&& (TREE_CODE (t) != SSA_NAME
|| SSA_NAME_VAR (t) != gimple_vop (cfun)))
/* Return true when the T can be shared. */
-bool
+static bool
tree_node_can_be_shared (tree t)
{
if (IS_TYPE_OR_DECL_P (t)
if (TREE_CODE (t) == CASE_LABEL_EXPR)
return true;
- while (((TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
- && is_gimple_min_invariant (TREE_OPERAND (t, 1)))
- || TREE_CODE (t) == COMPONENT_REF
- || TREE_CODE (t) == REALPART_EXPR
- || TREE_CODE (t) == IMAGPART_EXPR)
- t = TREE_OPERAND (t, 0);
-
if (DECL_P (t))
return true;
return false;
}
-/* Called via walk_gimple_stmt. Verify tree sharing. */
+/* Called via walk_tree. Verify tree sharing. */
static tree
-verify_node_sharing (tree *tp, int *walk_subtrees, void *data)
+verify_node_sharing_1 (tree *tp, int *walk_subtrees, void *data)
{
- struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
- struct pointer_set_t *visited = (struct pointer_set_t *) wi->info;
+ struct pointer_set_t *visited = (struct pointer_set_t *) data;
if (tree_node_can_be_shared (*tp))
{
return NULL;
}
+/* Called via walk_gimple_stmt. Verify tree sharing. */
+
+static tree
+verify_node_sharing (tree *tp, int *walk_subtrees, void *data)
+{
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+ return verify_node_sharing_1 (tp, walk_subtrees, wi->info);
+}
+
static bool eh_error_found;
static int
verify_eh_throw_stmt_node (void **slot, void *data)
return 1;
}
+/* Verify if the location LOCs block is in BLOCKS. */
+
+static bool
+verify_location (pointer_set_t *blocks, location_t loc)
+{
+ tree block = LOCATION_BLOCK (loc);
+ if (block != NULL_TREE
+ && !pointer_set_contains (blocks, block))
+ {
+ error ("location references block not in block tree");
+ return true;
+ }
+ if (block != NULL_TREE)
+ return verify_location (blocks, BLOCK_SOURCE_LOCATION (block));
+ return false;
+}
+
+/* Called via walk_tree. Verify that expressions have no blocks. */
+
+static tree
+verify_expr_no_block (tree *tp, int *walk_subtrees, void *)
+{
+ if (!EXPR_P (*tp))
+ {
+ *walk_subtrees = false;
+ return NULL;
+ }
+
+ location_t loc = EXPR_LOCATION (*tp);
+ if (LOCATION_BLOCK (loc) != NULL)
+ return *tp;
+
+ return NULL;
+}
+
+/* Called via walk_tree. Verify locations of expressions. */
+
+static tree
+verify_expr_location_1 (tree *tp, int *walk_subtrees, void *data)
+{
+ struct pointer_set_t *blocks = (struct pointer_set_t *) data;
+
+ if (TREE_CODE (*tp) == VAR_DECL
+ && DECL_HAS_DEBUG_EXPR_P (*tp))
+ {
+ tree t = DECL_DEBUG_EXPR (*tp);
+ tree addr = walk_tree (&t, verify_expr_no_block, NULL, NULL);
+ if (addr)
+ return addr;
+ }
+ if ((TREE_CODE (*tp) == VAR_DECL
+ || TREE_CODE (*tp) == PARM_DECL
+ || TREE_CODE (*tp) == RESULT_DECL)
+ && DECL_HAS_VALUE_EXPR_P (*tp))
+ {
+ tree t = DECL_VALUE_EXPR (*tp);
+ tree addr = walk_tree (&t, verify_expr_no_block, NULL, NULL);
+ if (addr)
+ return addr;
+ }
+
+ if (!EXPR_P (*tp))
+ {
+ *walk_subtrees = false;
+ return NULL;
+ }
+
+ location_t loc = EXPR_LOCATION (*tp);
+ if (verify_location (blocks, loc))
+ return *tp;
+
+ return NULL;
+}
+
+/* Called via walk_gimple_op. Verify locations of expressions. */
+
+static tree
+verify_expr_location (tree *tp, int *walk_subtrees, void *data)
+{
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+ return verify_expr_location_1 (tp, walk_subtrees, wi->info);
+}
+
+/* Insert all subblocks of BLOCK into BLOCKS and recurse. */
+
+static void
+collect_subblocks (pointer_set_t *blocks, tree block)
+{
+ tree t;
+ for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t))
+ {
+ pointer_set_insert (blocks, t);
+ collect_subblocks (blocks, t);
+ }
+}
+
/* Verify the GIMPLE statements in the CFG of FN. */
DEBUG_FUNCTION void
{
basic_block bb;
bool err = false;
- struct pointer_set_t *visited, *visited_stmts;
+ struct pointer_set_t *visited, *visited_stmts, *blocks;
timevar_push (TV_TREE_STMT_VERIFY);
visited = pointer_set_create ();
visited_stmts = pointer_set_create ();
+ /* Collect all BLOCKs referenced by the BLOCK tree of FN. */
+ blocks = pointer_set_create ();
+ if (DECL_INITIAL (fn->decl))
+ {
+ pointer_set_insert (blocks, DECL_INITIAL (fn->decl));
+ collect_subblocks (blocks, DECL_INITIAL (fn->decl));
+ }
+
FOR_EACH_BB_FN (bb, fn)
{
gimple_stmt_iterator gsi;
err2 |= verify_gimple_phi (phi);
+ /* Only PHI arguments have locations. */
+ if (gimple_location (phi) != UNKNOWN_LOCATION)
+ {
+ error ("PHI node with location");
+ err2 = true;
+ }
+
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
tree arg = gimple_phi_arg_def (phi, i);
- tree addr = walk_tree (&arg, verify_node_sharing, visited, NULL);
+ tree addr = walk_tree (&arg, verify_node_sharing_1,
+ visited, NULL);
if (addr)
{
error ("incorrect sharing of tree nodes");
debug_generic_expr (addr);
err2 |= true;
}
+ location_t loc = gimple_phi_arg_location (phi, i);
+ if (virtual_operand_p (gimple_phi_result (phi))
+ && loc != UNKNOWN_LOCATION)
+ {
+ error ("virtual PHI with argument locations");
+ err2 = true;
+ }
+ addr = walk_tree (&arg, verify_expr_location_1, blocks, NULL);
+ if (addr)
+ {
+ debug_generic_expr (addr);
+ err2 = true;
+ }
+ err2 |= verify_location (blocks, loc);
}
if (err2)
}
err2 |= verify_gimple_stmt (stmt);
+ err2 |= verify_location (blocks, gimple_location (stmt));
memset (&wi, 0, sizeof (wi));
wi.info = (void *) visited;
err2 |= true;
}
+ memset (&wi, 0, sizeof (wi));
+ wi.info = (void *) blocks;
+ addr = walk_gimple_op (stmt, verify_expr_location, &wi);
+ if (addr)
+ {
+ debug_generic_expr (addr);
+ err2 |= true;
+ }
+
/* ??? Instead of not checking these stmts at all the walker
should know its context via wi. */
if (!is_gimple_debug (stmt)
pointer_set_destroy (visited);
pointer_set_destroy (visited_stmts);
+ pointer_set_destroy (blocks);
verify_histograms ();
timevar_pop (TV_TREE_STMT_VERIFY);
}
edge e;
edge_iterator ei;
- if (ENTRY_BLOCK_PTR->il.gimple.seq || ENTRY_BLOCK_PTR->il.gimple.phi_nodes)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
+ || ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
{
error ("ENTRY_BLOCK has IL associated with it");
err = 1;
}
- if (EXIT_BLOCK_PTR->il.gimple.seq || EXIT_BLOCK_PTR->il.gimple.phi_nodes)
+ if (EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
+ || EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
{
error ("EXIT_BLOCK has IL associated with it");
err = 1;
}
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e->flags & EDGE_FALLTHRU)
{
error ("fallthru to exit from bb %d", e->src->index);
error ("wrong outgoing edge flags at end of bb %d", bb->index);
err = 1;
}
- if (single_succ (bb) != EXIT_BLOCK_PTR)
+ if (single_succ (bb) != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
error ("return edge does not point to exit in bb %d",
bb->index);
phi = gsi_stmt (gsi);
var = gimple_phi_result (phi);
new_phi = create_phi_node (var, bb);
- SSA_NAME_DEF_STMT (var) = new_phi;
- gimple_phi_set_result (phi, make_ssa_name (SSA_NAME_VAR (var), phi));
+ gimple_phi_set_result (phi, copy_ssa_name (var, phi));
add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
UNKNOWN_LOCATION);
}
if (e->flags & EDGE_EH)
return redirect_eh_edge (e, dest);
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
ret = gimple_try_redirect_by_replacing_jump (e, dest);
if (ret)
}
+/* Return TRUE if block BB has no executable statements, otherwise return
+ FALSE. */
+
+static bool
+gimple_empty_block_p (basic_block bb)
+{
+ /* BB must have no executable statements. */
+ gimple_stmt_iterator gsi = gsi_after_labels (bb);
+ if (phi_nodes (bb))
+ return false;
+ if (gsi_end_p (gsi))
+ return true;
+ if (is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+ return gsi_end_p (gsi);
+}
+
+
+/* Split a basic block if it ends with a conditional branch and if the
+ other part of the block is not empty. */
+
+static basic_block
+gimple_split_block_before_cond_jump (basic_block bb)
+{
+ gimple last, split_point;
+ gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
+ if (gsi_end_p (gsi))
+ return NULL;
+ last = gsi_stmt (gsi);
+ if (gimple_code (last) != GIMPLE_COND
+ && gimple_code (last) != GIMPLE_SWITCH)
+ return NULL;
+ gsi_prev_nondebug (&gsi);
+ split_point = gsi_stmt (gsi);
+ return split_block (bb, split_point)->dest;
+}
+
+
/* Return true if basic_block can be duplicated. */
static bool
gimple_seq phis = phi_nodes (bb);
gimple phi, stmt, copy;
- new_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
+ new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
/* Copy the PHI nodes. We ignore PHI node arguments here because
the incoming edges have not been setup yet. */
for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
{
phi = gsi_stmt (gsi);
- copy = create_phi_node (gimple_phi_result (phi), new_bb);
- create_new_def_for (gimple_phi_result (copy), copy,
+ copy = create_phi_node (NULL_TREE, new_bb);
+ create_new_def_for (gimple_phi_result (phi), copy,
gimple_phi_result_ptr (copy));
+ gimple_set_uid (copy, gimple_uid (phi));
}
gsi_tgt = gsi_start_bb (new_bb);
important exit edge EXIT. By important we mean that no SSA name defined
inside region is live over the other exit edges of the region. All entry
edges to the region must go to ENTRY->dest. The edge ENTRY is redirected
- to the duplicate of the region. SSA form, dominance and loop information
- is updated. The new basic blocks are stored to REGION_COPY in the same
- order as they had in REGION, provided that REGION_COPY is not NULL.
+ to the duplicate of the region. Dominance and loop information is
+ updated if UPDATE_DOMINANCE is true, but not the SSA web. If
+ UPDATE_DOMINANCE is false then we assume that the caller will update the
+ dominance information after calling this function. The new basic
+ blocks are stored to REGION_COPY in the same order as they had in REGION,
+ provided that REGION_COPY is not NULL.
The function returns false if it is unable to copy the region,
true otherwise. */
bool
gimple_duplicate_sese_region (edge entry, edge exit,
basic_block *region, unsigned n_region,
- basic_block *region_copy)
+ basic_block *region_copy,
+ bool update_dominance)
{
unsigned i;
bool free_region_copy = false, copying_header = false;
struct loop *loop = entry->dest->loop_father;
edge exit_copy;
- VEC (basic_block, heap) *doms;
+ vec<basic_block> doms;
edge redirected;
int total_freq = 0, entry_freq = 0;
gcov_type total_count = 0, entry_count = 0;
free_region_copy = true;
}
- gcc_assert (!need_ssa_update_p (cfun));
+ initialize_original_copy_tables ();
/* Record blocks outside the region that are dominated by something
inside. */
- doms = NULL;
- initialize_original_copy_tables ();
-
- doms = get_dominated_by_region (CDI_DOMINATORS, region, n_region);
+ if (update_dominance)
+ {
+ doms.create (0);
+ doms = get_dominated_by_region (CDI_DOMINATORS, region, n_region);
+ }
if (entry->dest->count)
{
}
copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
- split_edge_bb_loc (entry));
+ split_edge_bb_loc (entry), update_dominance);
if (total_count)
{
scale_bbs_frequencies_gcov_type (region, n_region,
for entry block and its copy. Anything that is outside of the
region, but was dominated by something inside needs recounting as
well. */
- set_immediate_dominator (CDI_DOMINATORS, entry->dest, entry->src);
- VEC_safe_push (basic_block, heap, doms, get_bb_original (entry->dest));
- iterate_fix_dominators (CDI_DOMINATORS, doms, false);
- VEC_free (basic_block, heap, doms);
+ if (update_dominance)
+ {
+ set_immediate_dominator (CDI_DOMINATORS, entry->dest, entry->src);
+ doms.safe_push (get_bb_original (entry->dest));
+ iterate_fix_dominators (CDI_DOMINATORS, doms, false);
+ doms.release ();
+ }
/* Add the other PHI node arguments. */
add_phi_args_after_copy (region_copy, n_region, NULL);
- /* Update the SSA web. */
- update_ssa (TODO_update_ssa);
-
if (free_region_copy)
free (region_copy);
struct loop *loop = exit->dest->loop_father;
struct loop *orig_loop = entry->dest->loop_father;
basic_block switch_bb, entry_bb, nentry_bb;
- VEC (basic_block, heap) *doms;
+ vec<basic_block> doms;
int total_freq = 0, exit_freq = 0;
gcov_type total_count = 0, exit_count = 0;
edge exits[2], nexits[2], e;
}
copy_bbs (region, n_region, region_copy, exits, 2, nexits, orig_loop,
- split_edge_bb_loc (exit));
+ split_edge_bb_loc (exit), true);
if (total_count)
{
scale_bbs_frequencies_gcov_type (region, n_region,
/* Anything that is outside of the region, but was dominated by something
inside needs to update dominance info. */
iterate_fix_dominators (CDI_DOMINATORS, doms, false);
- VEC_free (basic_block, heap, doms);
+ doms.release ();
/* Update the SSA web. */
update_ssa (TODO_update_ssa);
void
gather_blocks_in_sese_region (basic_block entry, basic_block exit,
- VEC(basic_block,heap) **bbs_p)
+ vec<basic_block> *bbs_p)
{
basic_block son;
son;
son = next_dom_son (CDI_DOMINATORS, son))
{
- VEC_safe_push (basic_block, heap, *bbs_p, son);
+ bbs_p->safe_push (son);
if (son != exit)
gather_blocks_in_sese_region (son, exit, bbs_p);
}
tree to_context)
{
void **loc;
- tree new_name, decl = SSA_NAME_VAR (name);
+ tree new_name;
- gcc_assert (is_gimple_reg (name));
+ gcc_assert (!virtual_operand_p (name));
loc = pointer_map_contains (vars_map, name);
if (!loc)
{
- replace_by_duplicate_decl (&decl, vars_map, to_context);
-
- new_name = make_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
- decl, SSA_NAME_DEF_STMT (name));
- if (SSA_NAME_IS_DEFAULT_DEF (name))
- set_ssa_default_def (DECL_STRUCT_FUNCTION (to_context), decl, new_name);
+ tree decl = SSA_NAME_VAR (name);
+ if (decl)
+ {
+ replace_by_duplicate_decl (&decl, vars_map, to_context);
+ new_name = make_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
+ decl, SSA_NAME_DEF_STMT (name));
+ if (SSA_NAME_IS_DEFAULT_DEF (name))
+ set_ssa_default_def (DECL_STRUCT_FUNCTION (to_context),
+ decl, new_name);
+ }
+ else
+ new_name = copy_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
+ name, SSA_NAME_DEF_STMT (name));
loc = pointer_map_insert (vars_map, name);
*loc = new_name;
tree t = *tp;
if (EXPR_P (t))
- /* We should never have TREE_BLOCK set on non-statements. */
- gcc_assert (!TREE_BLOCK (t));
-
+ {
+ tree block = TREE_BLOCK (t);
+ if (block == p->orig_block
+ || (p->orig_block == NULL_TREE
+ && block != NULL_TREE))
+ TREE_SET_BLOCK (t, p->new_block);
+#ifdef ENABLE_CHECKING
+ else if (block != NULL_TREE)
+ {
+ while (block && TREE_CODE (block) == BLOCK && block != p->orig_block)
+ block = BLOCK_SUPERCONTEXT (block);
+ gcc_assert (block == p->orig_block);
+ }
+#endif
+ }
else if (DECL_P (t) || TREE_CODE (t) == SSA_NAME)
{
if (TREE_CODE (t) == SSA_NAME)
{
int old_nr, new_nr;
- old_nr = tree_low_cst (old_t_nr, 0);
+ old_nr = tree_to_shwi (old_t_nr);
new_nr = move_stmt_eh_region_nr (old_nr, p);
return build_int_cst (integer_type_node, new_nr);
gimple stmt = gsi_stmt (*gsi_p);
tree block = gimple_block (stmt);
- if (p->orig_block == NULL_TREE
- || block == p->orig_block
- || block == NULL_TREE)
+ if (block == p->orig_block
+ || (p->orig_block == NULL_TREE
+ && block != NULL_TREE))
gimple_set_block (stmt, p->new_block);
-#ifdef ENABLE_CHECKING
- else if (block != p->new_block)
- {
- while (block && block != p->orig_block)
- block = BLOCK_SUPERCONTEXT (block);
- gcc_assert (block);
- }
-#endif
switch (gimple_code (stmt))
{
/* Remove BB from dominance structures. */
delete_from_dominance_info (CDI_DOMINATORS, bb);
+
+ /* Move BB from its current loop to the copy in the new function. */
if (current_loops)
- remove_bb_from_loops (bb);
+ {
+ struct loop *new_loop = (struct loop *)bb->loop_father->aux;
+ if (new_loop)
+ bb->loop_father = new_loop;
+ }
/* Link BB to the new linked list. */
move_block_after (bb, after);
}
/* Remove BB from the original basic block array. */
- VEC_replace (basic_block, cfun->cfg->x_basic_block_info, bb->index, NULL);
+ (*cfun->cfg->x_basic_block_info)[bb->index] = NULL;
cfun->cfg->x_n_basic_blocks--;
/* Grow DEST_CFUN's basic block array if needed. */
if (bb->index >= cfg->x_last_basic_block)
cfg->x_last_basic_block = bb->index + 1;
- old_len = VEC_length (basic_block, cfg->x_basic_block_info);
+ old_len = vec_safe_length (cfg->x_basic_block_info);
if ((unsigned) cfg->x_last_basic_block >= old_len)
{
new_len = cfg->x_last_basic_block + (cfg->x_last_basic_block + 3) / 4;
- VEC_safe_grow_cleared (basic_block, gc, cfg->x_basic_block_info,
- new_len);
+ vec_safe_grow_cleared (cfg->x_basic_block_info, new_len);
}
- VEC_replace (basic_block, cfg->x_basic_block_info,
- bb->index, bb);
+ (*cfg->x_basic_block_info)[bb->index] = bb;
/* Remap the variables in phi nodes. */
for (si = gsi_start_phis (bb); !gsi_end_p (si); )
use_operand_p use;
tree op = PHI_RESULT (phi);
ssa_op_iter oi;
+ unsigned i;
- if (!is_gimple_reg (op))
+ if (virtual_operand_p (op))
{
/* Remove the phi nodes for virtual operands (alias analysis will be
run for the new function, anyway). */
SET_USE (use, replace_ssa_name (op, d->vars_map, dest_cfun->decl));
}
+ for (i = 0; i < EDGE_COUNT (bb->preds); i++)
+ {
+ location_t locus = gimple_phi_arg_location (phi, i);
+ tree block = LOCATION_BLOCK (locus);
+
+ if (locus == UNKNOWN_LOCATION)
+ continue;
+ if (d->orig_block == NULL_TREE || block == d->orig_block)
+ {
+ if (d->new_block == NULL_TREE)
+ locus = LOCATION_LOCUS (locus);
+ else
+ locus = COMBINE_LOCATION_DATA (line_table, locus, d->new_block);
+ gimple_phi_arg_set_location (phi, i, locus);
+ }
+ }
+
gsi_next (&si);
}
gcc_assert (uid > -1);
- old_len = VEC_length (basic_block, cfg->x_label_to_block_map);
+ old_len = vec_safe_length (cfg->x_label_to_block_map);
if (old_len <= (unsigned) uid)
{
new_len = 3 * uid / 2 + 1;
- VEC_safe_grow_cleared (basic_block, gc,
- cfg->x_label_to_block_map, new_len);
+ vec_safe_grow_cleared (cfg->x_label_to_block_map, new_len);
}
- VEC_replace (basic_block, cfg->x_label_to_block_map, uid, bb);
- VEC_replace (basic_block, cfun->cfg->x_label_to_block_map, uid, NULL);
+ (*cfg->x_label_to_block_map)[uid] = bb;
+ (*cfun->cfg->x_label_to_block_map)[uid] = NULL;
gcc_assert (DECL_CONTEXT (label) == dest_cfun->decl);
/* We cannot leave any operands allocated from the operand caches of
the current function. */
- free_stmt_operands (stmt);
+ free_stmt_operands (cfun, stmt);
push_cfun (dest_cfun);
update_stmt (stmt);
pop_cfun ();
}
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->goto_locus)
+ if (e->goto_locus != UNKNOWN_LOCATION)
{
- tree block = e->goto_block;
+ tree block = LOCATION_BLOCK (e->goto_locus);
if (d->orig_block == NULL_TREE
|| block == d->orig_block)
- e->goto_block = d->new_block;
-#ifdef ENABLE_CHECKING
- else if (block != d->new_block)
- {
- while (block && block != d->orig_block)
- block = BLOCK_SUPERCONTEXT (block);
- gcc_assert (block);
- }
-#endif
+ e->goto_locus = d->new_block ?
+ COMBINE_LOCATION_DATA (line_table, e->goto_locus, d->new_block) :
+ LOCATION_LOCUS (e->goto_locus);
}
}
replace_block_vars_by_duplicates (block, vars_map, to_context);
}
+/* Fixup the loop arrays and numbers after moving LOOP and its subloops
+ from FN1 to FN2. */
+
+static void
+fixup_loop_arrays_after_move (struct function *fn1, struct function *fn2,
+ struct loop *loop)
+{
+ /* Discard it from the old loop array. */
+ (*get_loops (fn1))[loop->num] = NULL;
+
+ /* Place it in the new loop array, assigning it a new number. */
+ loop->num = number_of_loops (fn2);
+ vec_safe_push (loops_for_fn (fn2)->larray, loop);
+
+ /* Recurse to children. */
+ for (loop = loop->inner; loop; loop = loop->next)
+ fixup_loop_arrays_after_move (fn1, fn2, loop);
+}
+
/* Move a single-entry, single-exit region delimited by ENTRY_BB and
EXIT_BB to function DEST_CFUN. The whole region is replaced by a
single basic block in the original CFG and the new basic block is
move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
basic_block exit_bb, tree orig_block)
{
- VEC(basic_block,heap) *bbs, *dom_bbs;
+ vec<basic_block> bbs, dom_bbs;
basic_block dom_entry = get_immediate_dominator (CDI_DOMINATORS, entry_bb);
basic_block after, bb, *entry_pred, *exit_succ, abb;
struct function *saved_cfun = cfun;
int *entry_flag, *exit_flag;
unsigned *entry_prob, *exit_prob;
- unsigned i, num_entry_edges, num_exit_edges;
+ unsigned i, num_entry_edges, num_exit_edges, num_nodes;
edge e;
edge_iterator ei;
htab_t new_label_map;
struct pointer_map_t *vars_map, *eh_map;
struct loop *loop = entry_bb->loop_father;
+ struct loop *loop0 = get_loop (saved_cfun, 0);
struct move_stmt_d d;
/* If ENTRY does not strictly dominate EXIT, this cannot be an SESE
/* Collect all the blocks in the region. Manually add ENTRY_BB
because it won't be added by dfs_enumerate_from. */
- bbs = NULL;
- VEC_safe_push (basic_block, heap, bbs, entry_bb);
+ bbs.create (0);
+ bbs.safe_push (entry_bb);
gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
/* The blocks that used to be dominated by something in BBS will now be
dominated by the new block. */
dom_bbs = get_dominated_by_region (CDI_DOMINATORS,
- VEC_address (basic_block, bbs),
- VEC_length (basic_block, bbs));
+ bbs.address (),
+ bbs.length ());
/* Detach ENTRY_BB and EXIT_BB from CFUN->CFG. We need to remember
the predecessor edges to ENTRY_BB and the successor edges to
EXIT_BB so that we can re-attach them to the new basic block that
will replace the region. */
num_entry_edges = EDGE_COUNT (entry_bb->preds);
- entry_pred = (basic_block *) xcalloc (num_entry_edges, sizeof (basic_block));
- entry_flag = (int *) xcalloc (num_entry_edges, sizeof (int));
+ entry_pred = XNEWVEC (basic_block, num_entry_edges);
+ entry_flag = XNEWVEC (int, num_entry_edges);
entry_prob = XNEWVEC (unsigned, num_entry_edges);
i = 0;
for (ei = ei_start (entry_bb->preds); (e = ei_safe_edge (ei)) != NULL;)
if (exit_bb)
{
num_exit_edges = EDGE_COUNT (exit_bb->succs);
- exit_succ = (basic_block *) xcalloc (num_exit_edges,
- sizeof (basic_block));
- exit_flag = (int *) xcalloc (num_exit_edges, sizeof (int));
+ exit_succ = XNEWVEC (basic_block, num_exit_edges);
+ exit_flag = XNEWVEC (int, num_exit_edges);
exit_prob = XNEWVEC (unsigned, num_exit_edges);
i = 0;
for (ei = ei_start (exit_bb->succs); (e = ei_safe_edge (ei)) != NULL;)
{
eh_region region = NULL;
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
region = find_outermost_region_in_block (saved_cfun, bb, region);
init_eh_for_function ();
}
}
+ /* Initialize an empty loop tree. */
+ struct loops *loops = ggc_alloc_cleared_loops ();
+ init_loops_structure (dest_cfun, loops, 1);
+ loops->state = LOOPS_MAY_HAVE_MULTIPLE_LATCHES;
+ set_loops_for_fn (dest_cfun, loops);
+
+ /* Move the outlined loop tree part. */
+ num_nodes = bbs.length ();
+ FOR_EACH_VEC_ELT (bbs, i, bb)
+ {
+ if (bb->loop_father->header == bb)
+ {
+ struct loop *this_loop = bb->loop_father;
+ struct loop *outer = loop_outer (this_loop);
+ if (outer == loop
+ /* If the SESE region contains some bbs ending with
+ a noreturn call, those are considered to belong
+ to the outermost loop in saved_cfun, rather than
+ the entry_bb's loop_father. */
+ || outer == loop0)
+ {
+ if (outer != loop)
+ num_nodes -= this_loop->num_nodes;
+ flow_loop_tree_node_remove (bb->loop_father);
+ flow_loop_tree_node_add (get_loop (dest_cfun, 0), this_loop);
+ fixup_loop_arrays_after_move (saved_cfun, cfun, this_loop);
+ }
+ }
+ else if (bb->loop_father == loop0 && loop0 != loop)
+ num_nodes--;
+
+ /* Remove loop exits from the outlined region. */
+ if (loops_for_fn (saved_cfun)->exits)
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ void **slot = htab_find_slot_with_hash
+ (loops_for_fn (saved_cfun)->exits, e,
+ htab_hash_pointer (e), NO_INSERT);
+ if (slot)
+ htab_clear_slot (loops_for_fn (saved_cfun)->exits, slot);
+ }
+ }
+
+
+ /* Adjust the number of blocks in the tree root of the outlined part. */
+ get_loop (dest_cfun, 0)->num_nodes = bbs.length () + 2;
+
+ /* Setup a mapping to be used by move_block_to_fn. */
+ loop->aux = current_loops->tree_root;
+ loop0->aux = current_loops->tree_root;
+
pop_cfun ();
/* Move blocks from BBS into DEST_CFUN. */
- gcc_assert (VEC_length (basic_block, bbs) >= 2);
+ gcc_assert (bbs.length () >= 2);
after = dest_cfun->cfg->x_entry_block_ptr;
vars_map = pointer_map_create ();
d.eh_map = eh_map;
d.remap_decls_p = true;
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
{
/* No need to update edge counts on the last block. It has
already been updated earlier when we detached the region from
after = bb;
}
+ loop->aux = NULL;
+ loop0->aux = NULL;
+ /* Loop sizes are no longer correct, fix them up. */
+ loop->num_nodes -= num_nodes;
+ for (struct loop *outer = loop_outer (loop);
+ outer; outer = loop_outer (outer))
+ outer->num_nodes -= num_nodes;
+ loop0->num_nodes -= bbs.length () - num_nodes;
+
+ if (saved_cfun->has_simduid_loops || saved_cfun->has_force_vect_loops)
+ {
+ struct loop *aloop;
+ for (i = 0; vec_safe_iterate (loops->larray, i, &aloop); i++)
+ if (aloop != NULL)
+ {
+ if (aloop->simduid)
+ {
+ replace_by_duplicate_decl (&aloop->simduid, d.vars_map,
+ d.to_context);
+ dest_cfun->has_simduid_loops = true;
+ }
+ if (aloop->force_vect)
+ dest_cfun->has_force_vect_loops = true;
+ }
+ }
+
/* Rewire BLOCK_SUBBLOCKS of orig_block. */
if (orig_block)
{
FIXME, this is silly. The CFG ought to become a parameter to
these helpers. */
push_cfun (dest_cfun);
- make_edge (ENTRY_BLOCK_PTR, entry_bb, EDGE_FALLTHRU);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), entry_bb, EDGE_FALLTHRU);
if (exit_bb)
- make_edge (exit_bb, EXIT_BLOCK_PTR, 0);
+ make_edge (exit_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
pop_cfun ();
/* Back in the original function, the SESE region has disappeared,
}
set_immediate_dominator (CDI_DOMINATORS, bb, dom_entry);
- FOR_EACH_VEC_ELT (basic_block, dom_bbs, i, abb)
+ FOR_EACH_VEC_ELT (dom_bbs, i, abb)
set_immediate_dominator (CDI_DOMINATORS, abb, bb);
- VEC_free (basic_block, heap, dom_bbs);
+ dom_bbs.release ();
if (exit_bb)
{
free (entry_prob);
free (entry_flag);
free (entry_pred);
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
return bb;
}
-/* Dump FUNCTION_DECL FN to file FILE using FLAGS (see TDF_* in tree-pass.h)
+/* Dump FUNCTION_DECL FN to file FILE using FLAGS (see TDF_* in dumpfile.h)
*/
void
-dump_function_to_file (tree fn, FILE *file, int flags)
+dump_function_to_file (tree fndecl, FILE *file, int flags)
{
- tree arg, var;
+ tree arg, var, old_current_fndecl = current_function_decl;
struct function *dsf;
bool ignore_topmost_bind = false, any_var = false;
basic_block bb;
tree chain;
- bool tmclone = TREE_CODE (fn) == FUNCTION_DECL && decl_is_tm_clone (fn);
+ bool tmclone = (TREE_CODE (fndecl) == FUNCTION_DECL
+ && decl_is_tm_clone (fndecl));
+ struct function *fun = DECL_STRUCT_FUNCTION (fndecl);
- fprintf (file, "%s %s(", current_function_name (),
- tmclone ? "[tm-clone] " : "");
+ current_function_decl = fndecl;
+ fprintf (file, "%s %s(", function_name (fun), tmclone ? "[tm-clone] " : "");
- arg = DECL_ARGUMENTS (fn);
+ arg = DECL_ARGUMENTS (fndecl);
while (arg)
{
print_generic_expr (file, TREE_TYPE (arg), dump_flags);
fprintf (file, ")\n");
if (flags & TDF_VERBOSE)
- print_node (file, "", fn, 2);
+ print_node (file, "", fndecl, 2);
- dsf = DECL_STRUCT_FUNCTION (fn);
+ dsf = DECL_STRUCT_FUNCTION (fndecl);
if (dsf && (flags & TDF_EH))
dump_eh_tree (file, dsf);
- if (flags & TDF_RAW && !gimple_has_body_p (fn))
+ if (flags & TDF_RAW && !gimple_has_body_p (fndecl))
{
- dump_node (fn, TDF_SLIM | flags, file);
+ dump_node (fndecl, TDF_SLIM | flags, file);
+ current_function_decl = old_current_fndecl;
return;
}
- /* Switch CFUN to point to FN. */
- push_cfun (DECL_STRUCT_FUNCTION (fn));
-
/* When GIMPLE is lowered, the variables are no longer available in
BIND_EXPRs, so display them separately. */
- if (cfun && cfun->decl == fn && !VEC_empty (tree, cfun->local_decls))
+ if (fun && fun->decl == fndecl && (fun->curr_properties & PROP_gimple_lcf))
{
unsigned ix;
ignore_topmost_bind = true;
fprintf (file, "{\n");
- FOR_EACH_LOCAL_DECL (cfun, ix, var)
- {
- print_generic_decl (file, var, flags);
- if (flags & TDF_VERBOSE)
- print_node (file, "", var, 4);
- fprintf (file, "\n");
+ if (!vec_safe_is_empty (fun->local_decls))
+ FOR_EACH_LOCAL_DECL (fun, ix, var)
+ {
+ print_generic_decl (file, var, flags);
+ if (flags & TDF_VERBOSE)
+ print_node (file, "", var, 4);
+ fprintf (file, "\n");
- any_var = true;
- }
+ any_var = true;
+ }
+ if (gimple_in_ssa_p (cfun))
+ for (ix = 1; ix < num_ssa_names; ++ix)
+ {
+ tree name = ssa_name (ix);
+ if (name && !SSA_NAME_VAR (name))
+ {
+ fprintf (file, " ");
+ print_generic_expr (file, TREE_TYPE (name), flags);
+ fprintf (file, " ");
+ print_generic_expr (file, name, flags);
+ fprintf (file, ";\n");
+
+ any_var = true;
+ }
+ }
}
- if (cfun && cfun->decl == fn && cfun->cfg && basic_block_info)
+ if (fun && fun->decl == fndecl
+ && fun->cfg
+ && basic_block_info_for_fn (fun))
{
/* If the CFG has been built, emit a CFG-based dump. */
if (!ignore_topmost_bind)
fprintf (file, "{\n");
- if (any_var && n_basic_blocks)
+ if (any_var && n_basic_blocks_for_fn (fun))
fprintf (file, "\n");
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, fun)
dump_bb (file, bb, 2, flags | TDF_COMMENT);
fprintf (file, "}\n");
}
- else if (DECL_SAVED_TREE (fn) == NULL)
+ else if (DECL_SAVED_TREE (fndecl) == NULL)
{
/* The function is now in GIMPLE form but the CFG has not been
built yet. Emit the single sequence of GIMPLE statements
that make up its body. */
- gimple_seq body = gimple_body (fn);
+ gimple_seq body = gimple_body (fndecl);
if (gimple_seq_first_stmt (body)
&& gimple_seq_first_stmt (body) == gimple_seq_last_stmt (body)
int indent;
/* Make a tree based dump. */
- chain = DECL_SAVED_TREE (fn);
-
+ chain = DECL_SAVED_TREE (fndecl);
if (chain && TREE_CODE (chain) == BIND_EXPR)
{
if (ignore_topmost_bind)
dump_enumerated_decls (file, flags);
fprintf (file, "\n\n");
- /* Restore CFUN. */
- pop_cfun ();
+ current_function_decl = old_current_fndecl;
}
-
/* Dump FUNCTION_DECL FN to stderr using FLAGS (see TDF_* in tree.h) */
DEBUG_FUNCTION void
s_indent[indent] = '\0';
/* Print loop's header. */
- fprintf (file, "%sloop_%d (header = %d, latch = %d", s_indent,
- loop->num, loop->header->index, loop->latch->index);
+ fprintf (file, "%sloop_%d (", s_indent, loop->num);
+ if (loop->header)
+ fprintf (file, "header = %d", loop->header->index);
+ else
+ {
+ fprintf (file, "deleted)\n");
+ return;
+ }
+ if (loop->latch)
+ fprintf (file, ", latch = %d", loop->latch->index);
+ else
+ fprintf (file, ", multiple latches");
fprintf (file, ", niter = ");
print_generic_expr (file, loop->nb_iterations, 0);
loop, or just its structure. */
static void
-print_loop_and_siblings (FILE *file, struct loop *loop, int indent, int verbosity)
+print_loop_and_siblings (FILE *file, struct loop *loop, int indent,
+ int verbosity)
{
if (loop == NULL)
return;
{
basic_block bb;
- bb = ENTRY_BLOCK_PTR;
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
if (bb && bb->loop_father)
print_loop_and_siblings (file, bb->loop_father, 0, verbosity);
}
+/* Dump a loop. */
+
+DEBUG_FUNCTION void
+debug (struct loop &ref)
+{
+ print_loop (stderr, &ref, 0, /*verbosity*/0);
+}
+
+DEBUG_FUNCTION void
+debug (struct loop *ptr)
+{
+ if (ptr)
+ debug (*ptr);
+ else
+ fprintf (stderr, "<nil>\n");
+}
+
+/* Dump a loop verbosely. */
+
+DEBUG_FUNCTION void
+debug_verbose (struct loop &ref)
+{
+ print_loop (stderr, &ref, 0, /*verbosity*/3);
+}
+
+DEBUG_FUNCTION void
+debug_verbose (struct loop *ptr)
+{
+ if (ptr)
+ debug (*ptr);
+ else
+ fprintf (stderr, "<nil>\n");
+}
+
/* Debugging loops structure at tree level, at some VERBOSITY level. */
DEBUG_FUNCTION void
debug_loop_num (unsigned num, int verbosity)
{
- debug_loop (get_loop (num), verbosity);
+ debug_loop (get_loop (cfun, num), verbosity);
}
/* Return true if BB ends with a call, possibly followed by some
int last_bb = last_basic_block;
bool check_last_block = false;
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return 0;
if (! blocks)
check_last_block = true;
else
- check_last_block = TEST_BIT (blocks, EXIT_BLOCK_PTR->prev_bb->index);
+ check_last_block = bitmap_bit_p (blocks,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
/* In the last basic block, before epilogue generation, there will be
a fallthru edge to EXIT. Special care is required if the last insn
Handle this by adding a dummy instruction in a new last basic block. */
if (check_last_block)
{
- basic_block bb = EXIT_BLOCK_PTR->prev_bb;
+ basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
gimple t = NULL;
{
edge e;
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (e)
{
gsi_insert_on_edge (e, gimple_build_nop ());
return or not... */
for (i = 0; i < last_bb; i++)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
gimple_stmt_iterator gsi;
gimple stmt, last_stmt;
if (!bb)
continue;
- if (blocks && !TEST_BIT (blocks, i))
+ if (blocks && !bitmap_bit_p (blocks, i))
continue;
gsi = gsi_last_nondebug_bb (bb);
#ifdef ENABLE_CHECKING
if (stmt == last_stmt)
{
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
#endif
if (e)
blocks_split++;
}
- make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
gsi_prev (&gsi);
}
void
remove_edge_and_dominated_blocks (edge e)
{
- VEC (basic_block, heap) *bbs_to_remove = NULL;
- VEC (basic_block, heap) *bbs_to_fix_dom = NULL;
+ vec<basic_block> bbs_to_remove = vNULL;
+ vec<basic_block> bbs_to_fix_dom = vNULL;
bitmap df, df_idom;
edge f;
edge_iterator ei;
}
/* No updating is needed for edges to exit. */
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (cfgcleanup_altered_bbs)
bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
else
{
bbs_to_remove = get_all_dominated_blocks (CDI_DOMINATORS, e->dest);
- FOR_EACH_VEC_ELT (basic_block, bbs_to_remove, i, bb)
+ FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
{
FOR_EACH_EDGE (f, ei, bb->succs)
{
- if (f->dest != EXIT_BLOCK_PTR)
+ if (f->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (df, f->dest->index);
}
}
- FOR_EACH_VEC_ELT (basic_block, bbs_to_remove, i, bb)
+ FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
bitmap_clear_bit (df, bb->index);
EXECUTE_IF_SET_IN_BITMAP (df, 0, i, bi)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
bitmap_set_bit (df_idom,
get_immediate_dominator (CDI_DOMINATORS, bb)->index);
}
released DEFs into debug stmts. See
eliminate_unnecessary_stmts() in tree-ssa-dce.c for more
details. */
- for (i = VEC_length (basic_block, bbs_to_remove); i-- > 0; )
- delete_basic_block (VEC_index (basic_block, bbs_to_remove, i));
+ for (i = bbs_to_remove.length (); i-- > 0; )
+ delete_basic_block (bbs_to_remove[i]);
}
/* Update the dominance information. The immediate dominator may change only
the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
EXECUTE_IF_SET_IN_BITMAP (df_idom, 0, i, bi)
{
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
for (dbb = first_dom_son (CDI_DOMINATORS, bb);
dbb;
dbb = next_dom_son (CDI_DOMINATORS, dbb))
- VEC_safe_push (basic_block, heap, bbs_to_fix_dom, dbb);
+ bbs_to_fix_dom.safe_push (dbb);
}
iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
BITMAP_FREE (df);
BITMAP_FREE (df_idom);
- VEC_free (basic_block, heap, bbs_to_remove);
- VEC_free (basic_block, heap, bbs_to_fix_dom);
+ bbs_to_remove.release ();
+ bbs_to_fix_dom.release ();
}
/* Purge dead EH edges from basic block BB. */
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
/* Earlier gimple_purge_dead_eh_edges could have removed
this basic block already. */
edge_iterator ei;
gimple stmt = last_stmt (bb);
- if (!cfun->has_nonlocal_label)
+ if (!cfun->has_nonlocal_label
+ && !cfun->calls_setjmp)
return false;
if (stmt && stmt_can_make_abnormal_goto (stmt))
{
if (e->flags & EDGE_ABNORMAL)
{
- remove_edge_and_dominated_blocks (e);
+ if (e->flags & EDGE_FALLTHRU)
+ e->flags &= ~EDGE_ABNORMAL;
+ else
+ remove_edge_and_dominated_blocks (e);
changed = true;
}
else
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
/* Earlier gimple_purge_dead_abnormal_call_edges could have removed
this basic block already. */
e0->flags |= EDGE_FALSE_VALUE;
}
+
+/* Do book-keeping of basic block BB for the profile consistency checker.
+ If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
+ then do post-pass accounting. Store the counting in RECORD. */
+static void
+gimple_account_profile_record (basic_block bb, int after_pass,
+ struct profile_record *record)
+{
+ gimple_stmt_iterator i;
+ for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
+ {
+ record->size[after_pass]
+ += estimate_num_insns (gsi_stmt (i), &eni_size_weights);
+ if (profile_status == PROFILE_READ)
+ record->time[after_pass]
+ += estimate_num_insns (gsi_stmt (i),
+ &eni_time_weights) * bb->count;
+ else if (profile_status == PROFILE_GUESSED)
+ record->time[after_pass]
+ += estimate_num_insns (gsi_stmt (i),
+ &eni_time_weights) * bb->frequency;
+ }
+}
+
struct cfg_hooks gimple_cfg_hooks = {
"gimple",
gimple_verify_flow_info,
gimple_dump_bb, /* dump_bb */
+ gimple_dump_bb_for_graph, /* dump_bb_for_graph */
create_bb, /* create_basic_block */
gimple_redirect_edge_and_branch, /* redirect_edge_and_branch */
gimple_redirect_edge_and_branch_force, /* redirect_edge_and_branch_force */
gimple_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
gimple_lv_adjust_loop_header_phi, /* lv_adjust_loop_header_phi*/
extract_true_false_edges_from_block, /* extract_cond_bb_edges */
- flush_pending_stmts /* flush_pending_stmts */
+ flush_pending_stmts, /* flush_pending_stmts */
+ gimple_empty_block_p, /* block_empty_p */
+ gimple_split_block_before_cond_jump, /* split_block_before_cond_jump */
+ gimple_account_profile_record,
};
gimple_find_edge_insert_loc. */
else if ((!single_pred_p (e->dest)
|| !gimple_seq_empty_p (phi_nodes (e->dest))
- || e->dest == EXIT_BLOCK_PTR)
- && e->src != ENTRY_BLOCK_PTR
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !(e->flags & EDGE_ABNORMAL))
{
gimple_stmt_iterator gsi;
return 0;
}
-struct gimple_opt_pass pass_split_crit_edges =
-{
- {
- GIMPLE_PASS,
- "crited", /* name */
- NULL, /* gate */
- split_critical_edges, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_SPLIT_EDGES, /* tv_id */
- PROP_cfg, /* properties required */
- PROP_no_crit_edges, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_verify_flow /* todo_flags_finish */
- }
+namespace {
+
+const pass_data pass_data_split_crit_edges =
+{
+ GIMPLE_PASS, /* type */
+ "crited", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ false, /* has_gate */
+ true, /* has_execute */
+ TV_TREE_SPLIT_EDGES, /* tv_id */
+ PROP_cfg, /* properties_required */
+ PROP_no_crit_edges, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_verify_flow, /* todo_flags_finish */
};
+class pass_split_crit_edges : public gimple_opt_pass
+{
+public:
+ pass_split_crit_edges (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_split_crit_edges, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ unsigned int execute () { return split_critical_edges (); }
+
+ opt_pass * clone () { return new pass_split_crit_edges (m_ctxt); }
+}; // class pass_split_crit_edges
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_split_crit_edges (gcc::context *ctxt)
+{
+ return new pass_split_crit_edges (ctxt);
+}
+
/* Build a ternary operation and gimplify it. Emit code before GSI.
Return the gimple_val holding the result. */
/* If we have a path to EXIT, then we do return. */
if (TREE_THIS_VOLATILE (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0)
{
location = UNKNOWN_LOCATION;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
last = last_stmt (e->src);
if ((gimple_code (last) == GIMPLE_RETURN
without returning a value. */
else if (warn_return_type
&& !TREE_NO_WARNING (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0
&& !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl))))
{
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
gimple last = last_stmt (e->src);
if (gimple_code (last) == GIMPLE_RETURN
}
}
-struct gimple_opt_pass pass_warn_function_return =
-{
- {
- GIMPLE_PASS,
- "*warn_function_return", /* name */
- NULL, /* gate */
- execute_warn_function_return, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- PROP_cfg, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
-};
+namespace {
-/* Emit noreturn warnings. */
+const pass_data pass_data_warn_function_return =
+{
+ GIMPLE_PASS, /* type */
+ "*warn_function_return", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ false, /* has_gate */
+ true, /* has_execute */
+ TV_NONE, /* tv_id */
+ PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
-static unsigned int
-execute_warn_function_noreturn (void)
+class pass_warn_function_return : public gimple_opt_pass
{
- if (!TREE_THIS_VOLATILE (current_function_decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0)
- warn_function_noreturn (current_function_decl);
- return 0;
-}
+public:
+ pass_warn_function_return (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_warn_function_return, ctxt)
+ {}
-static bool
-gate_warn_function_noreturn (void)
-{
- return warn_suggest_attribute_noreturn;
-}
-
-struct gimple_opt_pass pass_warn_function_noreturn =
-{
- {
- GIMPLE_PASS,
- "*warn_function_noreturn", /* name */
- gate_warn_function_noreturn, /* gate */
- execute_warn_function_noreturn, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- PROP_cfg, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
-};
+ /* opt_pass methods: */
+ unsigned int execute () { return execute_warn_function_return (); }
+}; // class pass_warn_function_return
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_warn_function_return (gcc::context *ctxt)
+{
+ return new pass_warn_function_return (ctxt);
+}
/* Walk a gimplified function and warn for functions whose return value is
ignored and attribute((warn_unused_result)) is set. This is done before
return flag_warn_unused_result;
}
-struct gimple_opt_pass pass_warn_unused_result =
-{
- {
- GIMPLE_PASS,
- "*warn_unused_result", /* name */
- gate_warn_unused_result, /* gate */
- run_warn_unused_result, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- PROP_gimple_any, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0, /* todo_flags_finish */
- }
+namespace {
+
+const pass_data pass_data_warn_unused_result =
+{
+ GIMPLE_PASS, /* type */
+ "*warn_unused_result", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_gate */
+ true, /* has_execute */
+ TV_NONE, /* tv_id */
+ PROP_gimple_any, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
+
+class pass_warn_unused_result : public gimple_opt_pass
+{
+public:
+ pass_warn_unused_result (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_warn_unused_result, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ bool gate () { return gate_warn_unused_result (); }
+ unsigned int execute () { return run_warn_unused_result (); }
+
+}; // class pass_warn_unused_result
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_warn_unused_result (gcc::context *ctxt)
+{
+ return new pass_warn_unused_result (ctxt);
+}
+
+/* IPA passes, compilation of earlier functions or inlining
+ might have changed some properties, such as marked functions nothrow,
+ pure, const or noreturn.
+ Remove redundant edges and basic blocks, and create new ones if necessary.
+
+ This pass can't be executed as stand alone pass from pass manager, because
+ in between inlining and this fixup the verify_flow_info would fail. */
+
+unsigned int
+execute_fixup_cfg (void)
+{
+ basic_block bb;
+ gimple_stmt_iterator gsi;
+ int todo = gimple_in_ssa_p (cfun) ? TODO_verify_ssa : 0;
+ gcov_type count_scale;
+ edge e;
+ edge_iterator ei;
+
+ count_scale
+ = GCOV_COMPUTE_SCALE (cgraph_get_node (current_function_decl)->count,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count);
+
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
+ cgraph_get_node (current_function_decl)->count;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->count =
+ apply_scale (EXIT_BLOCK_PTR_FOR_FN (cfun)->count,
+ count_scale);
+
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
+ e->count = apply_scale (e->count, count_scale);
+
+ FOR_EACH_BB (bb)
+ {
+ bb->count = apply_scale (bb->count, count_scale);
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ tree decl = is_gimple_call (stmt)
+ ? gimple_call_fndecl (stmt)
+ : NULL;
+ if (decl)
+ {
+ int flags = gimple_call_flags (stmt);
+ if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
+ {
+ if (gimple_purge_dead_abnormal_call_edges (bb))
+ todo |= TODO_cleanup_cfg;
+
+ if (gimple_in_ssa_p (cfun))
+ {
+ todo |= TODO_update_ssa | TODO_cleanup_cfg;
+ update_stmt (stmt);
+ }
+ }
+
+ if (flags & ECF_NORETURN
+ && fixup_noreturn_call (stmt))
+ todo |= TODO_cleanup_cfg;
+ }
+
+ if (maybe_clean_eh_stmt (stmt)
+ && gimple_purge_dead_eh_edges (bb))
+ todo |= TODO_cleanup_cfg;
+ }
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ e->count = apply_scale (e->count, count_scale);
+
+ /* If we have a basic block with no successors that does not
+ end with a control statement or a noreturn call end it with
+ a call to __builtin_unreachable. This situation can occur
+ when inlining a noreturn call that does in fact return. */
+ if (EDGE_COUNT (bb->succs) == 0)
+ {
+ gimple stmt = last_stmt (bb);
+ if (!stmt
+ || (!is_ctrl_stmt (stmt)
+ && (!is_gimple_call (stmt)
+ || (gimple_call_flags (stmt) & ECF_NORETURN) == 0)))
+ {
+ stmt = gimple_build_call
+ (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
+ gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
+ }
+ }
+ }
+ if (count_scale != REG_BR_PROB_BASE)
+ compute_function_frequency ();
+
+ /* We just processed all calls. */
+ if (cfun->gimple_df)
+ vec_free (MODIFIED_NORETURN_CALLS (cfun));
+
+ /* Dump a textual representation of the flowgraph. */
+ if (dump_file)
+ gimple_dump_cfg (dump_file, dump_flags);
+
+ if (current_loops
+ && (todo & TODO_cleanup_cfg))
+ loops_state_set (LOOPS_NEED_FIXUP);
+
+ return todo;
+}
+
+namespace {
+
+const pass_data pass_data_fixup_cfg =
+{
+ GIMPLE_PASS, /* type */
+ "*free_cfg_annotations", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ false, /* has_gate */
+ true, /* has_execute */
+ TV_NONE, /* tv_id */
+ PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_fixup_cfg : public gimple_opt_pass
+{
+public:
+ pass_fixup_cfg (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_fixup_cfg, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () { return new pass_fixup_cfg (m_ctxt); }
+ unsigned int execute () { return execute_fixup_cfg (); }
+
+}; // class pass_fixup_cfg
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_fixup_cfg (gcc::context *ctxt)
+{
+ return new pass_fixup_cfg (ctxt);
+}
+
+/* Garbage collection support for edge_def. */
+
+extern void gt_ggc_mx (tree&);
+extern void gt_ggc_mx (gimple&);
+extern void gt_ggc_mx (rtx&);
+extern void gt_ggc_mx (basic_block&);
+
+void
+gt_ggc_mx (edge_def *e)
+{
+ tree block = LOCATION_BLOCK (e->goto_locus);
+ gt_ggc_mx (e->src);
+ gt_ggc_mx (e->dest);
+ if (current_ir_type () == IR_GIMPLE)
+ gt_ggc_mx (e->insns.g);
+ else
+ gt_ggc_mx (e->insns.r);
+ gt_ggc_mx (block);
+}
+
+/* PCH support for edge_def. */
+
+extern void gt_pch_nx (tree&);
+extern void gt_pch_nx (gimple&);
+extern void gt_pch_nx (rtx&);
+extern void gt_pch_nx (basic_block&);
+
+void
+gt_pch_nx (edge_def *e)
+{
+ tree block = LOCATION_BLOCK (e->goto_locus);
+ gt_pch_nx (e->src);
+ gt_pch_nx (e->dest);
+ if (current_ir_type () == IR_GIMPLE)
+ gt_pch_nx (e->insns.g);
+ else
+ gt_pch_nx (e->insns.r);
+ gt_pch_nx (block);
+}
+
+void
+gt_pch_nx (edge_def *e, gt_pointer_operator op, void *cookie)
+{
+ tree block = LOCATION_BLOCK (e->goto_locus);
+ op (&(e->src), cookie);
+ op (&(e->dest), cookie);
+ if (current_ir_type () == IR_GIMPLE)
+ op (&(e->insns.g), cookie);
+ else
+ op (&(e->insns.r), cookie);
+ op (&(block), cookie);
+}