/* Exception handling semantics and decomposition for trees.
- Copyright (C) 2003-2013 Free Software Foundation, Inc.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "hash-table.h"
#include "tm.h"
#include "tree.h"
+#include "expr.h"
+#include "calls.h"
#include "flags.h"
#include "function.h"
#include "except.h"
#include "pointer-set.h"
-#include "tree-flow.h"
+#include "basic-block.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "tree-eh.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimple-ssa.h"
+#include "cgraph.h"
+#include "tree-cfg.h"
+#include "tree-phinodes.h"
+#include "ssa-iterators.h"
+#include "stringpool.h"
+#include "tree-ssanames.h"
+#include "tree-into-ssa.h"
+#include "tree-ssa.h"
#include "tree-inline.h"
#include "tree-pass.h"
#include "langhooks.h"
-#include "ggc.h"
#include "diagnostic-core.h"
-#include "gimple.h"
#include "target.h"
#include "cfgloop.h"
+#include "gimple-low.h"
/* In some instances a tree and a gimple need to be stored in a same table,
i.e. in hash tables. This is a structure to do this. */
typedef union {tree *tp; tree t; gimple g;} treemple;
-/* Nonzero if we are using EH to handle cleanups. */
-static int using_eh_for_cleanups_p = 0;
-
-void
-using_eh_for_cleanups (void)
-{
- using_eh_for_cleanups_p = 1;
-}
-
/* Misc functions used in this file. */
/* Remember and lookup EH landing pad data for arbitrary statements.
/* Add statement T in function IFUN to landing pad NUM. */
-void
+static void
add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
{
struct throw_stmt_node *n;
gcc_assert (num != 0);
- n = ggc_alloc_throw_stmt_node ();
+ n = ggc_alloc<throw_stmt_node> ();
n->stmt = t;
n->lp_nr = num;
gimple parent;
};
+/* Hashtable helpers. */
+
+struct finally_tree_hasher : typed_free_remove <finally_tree_node>
+{
+ typedef finally_tree_node value_type;
+ typedef finally_tree_node compare_type;
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+};
+
+inline hashval_t
+finally_tree_hasher::hash (const value_type *v)
+{
+ return (intptr_t)v->child.t >> 4;
+}
+
+inline bool
+finally_tree_hasher::equal (const value_type *v, const compare_type *c)
+{
+ return v->child.t == c->child.t;
+}
+
/* Note that this table is *not* marked GTY. It is short-lived. */
-static htab_t finally_tree;
+static hash_table <finally_tree_hasher> finally_tree;
static void
record_in_finally_tree (treemple child, gimple parent)
{
struct finally_tree_node *n;
- void **slot;
+ finally_tree_node **slot;
n = XNEW (struct finally_tree_node);
n->child = child;
n->parent = parent;
- slot = htab_find_slot (finally_tree, n, INSERT);
+ slot = finally_tree.find_slot (n, INSERT);
gcc_assert (!*slot);
*slot = n;
}
do
{
n.child = start;
- p = (struct finally_tree_node *) htab_find (finally_tree, &n);
+ p = finally_tree.find (&n);
if (!p)
return true;
start.g = p->parent;
x = gimple_seq_last_stmt (finally);
finally_loc = x ? gimple_location (x) : tf_loc;
- /* Lower the finally block itself. */
- lower_eh_constructs_1 (state, &finally);
-
/* Prepare for switch statement generation. */
nlabels = tf->dest_array.length ();
return_index = nlabels;
x = gimple_build_label (finally_label);
gimple_seq_add_stmt (&tf->top_p_seq, x);
+ lower_eh_constructs_1 (state, &finally);
gimple_seq_add_seq (&tf->top_p_seq, finally);
/* Redirect each incoming goto edge. */
/* Make sure that the last case is the default label, as one is required.
Then sort the labels, which is also required in GIMPLE. */
CASE_LOW (last_case) = NULL;
+ tree tem = case_label_vec.pop ();
+ gcc_assert (tem == last_case);
sort_case_labels (case_label_vec);
/* Build the switch statement, setting last_case to be the default
this_tf.try_finally_expr = tp;
this_tf.top_p = tp;
this_tf.outer = state;
- if (using_eh_for_cleanups_p && !cleanup_is_dead_in (state->cur_region))
+ if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
{
this_tf.region = gen_eh_region_cleanup (state->cur_region);
this_state.cur_region = this_tf.region;
{
gimple_seq new_eh_seq = eh_seq;
eh_seq = old_eh_seq;
- gimple_seq_add_seq(&eh_seq, new_eh_seq);
+ gimple_seq_add_seq (&eh_seq, new_eh_seq);
}
}
lower_eh_constructs_2 (state, &gsi);
}
-static unsigned int
-lower_eh_constructs (void)
+namespace {
+
+const pass_data pass_data_lower_eh =
+{
+ GIMPLE_PASS, /* type */
+ "eh", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_execute */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ PROP_gimple_leh, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_lower_eh : public gimple_opt_pass
+{
+public:
+ pass_lower_eh (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_lower_eh, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual unsigned int execute (function *);
+
+}; // class pass_lower_eh
+
+unsigned int
+pass_lower_eh::execute (function *fun)
{
struct leh_state null_state;
gimple_seq bodyp;
if (bodyp == NULL)
return 0;
- finally_tree = htab_create (31, struct_ptr_hash, struct_ptr_eq, free);
+ finally_tree.create (31);
eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
memset (&null_state, 0, sizeof (null_state));
didn't change its value, and we don't have to re-set the function. */
gcc_assert (bodyp == gimple_body (current_function_decl));
- htab_delete (finally_tree);
+ finally_tree.dispose ();
BITMAP_FREE (eh_region_may_contain_throw_map);
eh_seq = NULL;
/* If this function needs a language specific EH personality routine
and the frontend didn't already set one do so now. */
- if (function_needs_eh_personality (cfun) == eh_personality_lang
+ if (function_needs_eh_personality (fun) == eh_personality_lang
&& !DECL_FUNCTION_PERSONALITY (current_function_decl))
DECL_FUNCTION_PERSONALITY (current_function_decl)
= lang_hooks.eh_personality ();
return 0;
}
-struct gimple_opt_pass pass_lower_eh =
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_lower_eh (gcc::context *ctxt)
{
- {
- GIMPLE_PASS,
- "eh", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- NULL, /* gate */
- lower_eh_constructs, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_EH, /* tv_id */
- PROP_gimple_lcf, /* properties_required */
- PROP_gimple_leh, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
-};
+ return new pass_lower_eh (ctxt);
+}
\f
/* Create the multiple edges from an EH_DISPATCH statement to all of
the possible handlers for its EH region. Return true if there's
&handled);
}
+
+/* Returns true if it is possible to prove that the index of
+ an array access REF (an ARRAY_REF expression) falls into the
+ array bounds. */
+
+static bool
+in_array_bounds_p (tree ref)
+{
+ tree idx = TREE_OPERAND (ref, 1);
+ tree min, max;
+
+ if (TREE_CODE (idx) != INTEGER_CST)
+ return false;
+
+ min = array_ref_low_bound (ref);
+ max = array_ref_up_bound (ref);
+ if (!min
+ || !max
+ || TREE_CODE (min) != INTEGER_CST
+ || TREE_CODE (max) != INTEGER_CST)
+ return false;
+
+ if (tree_int_cst_lt (idx, min)
+ || tree_int_cst_lt (max, idx))
+ return false;
+
+ return true;
+}
+
+/* Returns true if it is possible to prove that the range of
+ an array access REF (an ARRAY_RANGE_REF expression) falls
+ into the array bounds. */
+
+static bool
+range_in_array_bounds_p (tree ref)
+{
+ tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
+ tree range_min, range_max, min, max;
+
+ range_min = TYPE_MIN_VALUE (domain_type);
+ range_max = TYPE_MAX_VALUE (domain_type);
+ if (!range_min
+ || !range_max
+ || TREE_CODE (range_min) != INTEGER_CST
+ || TREE_CODE (range_max) != INTEGER_CST)
+ return false;
+
+ min = array_ref_low_bound (ref);
+ max = array_ref_up_bound (ref);
+ if (!min
+ || !max
+ || TREE_CODE (min) != INTEGER_CST
+ || TREE_CODE (max) != INTEGER_CST)
+ return false;
+
+ if (tree_int_cst_lt (range_min, min)
+ || tree_int_cst_lt (max, range_max))
+ return false;
+
+ return true;
+}
+
/* Return true if EXPR can trap, as in dereferencing an invalid pointer
location or floating point arithmetic. C.f. the rtl version, may_trap_p.
This routine expects only GIMPLE lhs or rhs input. */
restart:
switch (code)
{
- case TARGET_MEM_REF:
- if (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR
- && !TMR_INDEX (expr) && !TMR_INDEX2 (expr))
- return false;
- return !TREE_THIS_NOTRAP (expr);
-
case COMPONENT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
return false;
return !in_array_bounds_p (expr);
+ case TARGET_MEM_REF:
case MEM_REF:
- if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
+ if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
+ && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
+ return true;
+ if (TREE_THIS_NOTRAP (expr))
return false;
- /* Fallthru. */
+ /* We cannot prove that the access is in-bounds when we have
+ variable-index TARGET_MEM_REFs. */
+ if (code == TARGET_MEM_REF
+ && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
+ return true;
+ if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
+ {
+ tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
+ offset_int off = mem_ref_offset (expr);
+ if (wi::neg_p (off, SIGNED))
+ return true;
+ if (TREE_CODE (base) == STRING_CST)
+ return wi::leu_p (TREE_STRING_LENGTH (base), off);
+ else if (DECL_SIZE_UNIT (base) == NULL_TREE
+ || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
+ || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
+ return true;
+ /* Now we are sure the first byte of the access is inside
+ the object. */
+ return false;
+ }
+ return true;
+
case INDIRECT_REF:
return !TREE_THIS_NOTRAP (expr);
/* Assume that accesses to weak functions may trap, unless we know
they are certainly defined in current TU or in some other
LTO partition. */
- if (DECL_WEAK (expr))
+ if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
{
struct cgraph_node *node;
if (!DECL_EXTERNAL (expr))
return false;
node = cgraph_function_node (cgraph_get_node (expr), NULL);
- if (node && node->symbol.in_other_partition)
+ if (node && node->in_other_partition)
return false;
return true;
}
/* Assume that accesses to weak vars may trap, unless we know
they are certainly defined in current TU or in some other
LTO partition. */
- if (DECL_WEAK (expr))
+ if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
{
- struct varpool_node *node;
+ varpool_node *node;
if (!DECL_EXTERNAL (expr))
return false;
node = varpool_variable_node (varpool_get_node (expr), NULL);
- if (node && node->symbol.in_other_partition)
+ if (node && node->in_other_partition)
return false;
return true;
}
}
}
-static unsigned
-refactor_eh (void)
+namespace {
+
+const pass_data pass_data_refactor_eh =
{
- refactor_eh_r (gimple_body (current_function_decl));
- return 0;
-}
+ GIMPLE_PASS, /* type */
+ "ehopt", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_execute */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
-static bool
-gate_refactor_eh (void)
+class pass_refactor_eh : public gimple_opt_pass
{
- return flag_exceptions != 0;
-}
+public:
+ pass_refactor_eh (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_refactor_eh, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *) { return flag_exceptions != 0; }
+ virtual unsigned int execute (function *)
+ {
+ refactor_eh_r (gimple_body (current_function_decl));
+ return 0;
+ }
+
+}; // class pass_refactor_eh
+
+} // anon namespace
-struct gimple_opt_pass pass_refactor_eh =
+gimple_opt_pass *
+make_pass_refactor_eh (gcc::context *ctxt)
{
- {
- GIMPLE_PASS,
- "ehopt", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- gate_refactor_eh, /* gate */
- refactor_eh, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_EH, /* tv_id */
- PROP_gimple_lcf, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
-};
+ return new pass_refactor_eh (ctxt);
+}
\f
/* At the end of gimple optimization, we can lower RESX. */
return ret;
}
-static unsigned
-execute_lower_resx (void)
+namespace {
+
+const pass_data pass_data_lower_resx =
+{
+ GIMPLE_PASS, /* type */
+ "resx", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_execute */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_lower_resx : public gimple_opt_pass
+{
+public:
+ pass_lower_resx (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_lower_resx, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *) { return flag_exceptions != 0; }
+ virtual unsigned int execute (function *);
+
+}; // class pass_lower_resx
+
+unsigned
+pass_lower_resx::execute (function *fun)
{
basic_block bb;
struct pointer_map_t *mnt_map;
mnt_map = pointer_map_create ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, fun)
{
gimple last = last_stmt (bb);
if (last && is_gimple_resx (last))
return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
}
-static bool
-gate_lower_resx (void)
-{
- return flag_exceptions != 0;
-}
+} // anon namespace
-struct gimple_opt_pass pass_lower_resx =
+gimple_opt_pass *
+make_pass_lower_resx (gcc::context *ctxt)
{
- {
- GIMPLE_PASS,
- "resx", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- gate_lower_resx, /* gate */
- execute_lower_resx, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_EH, /* tv_id */
- PROP_gimple_lcf, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_verify_flow /* todo_flags_finish */
- }
-};
+ return new pass_lower_resx (ctxt);
+}
/* Try to optimize var = {v} {CLOBBER} stmts followed just by
external throw. */
optimize_clobbers (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ bool any_clobbers = false;
+ bool seen_stack_restore = false;
+ edge_iterator ei;
+ edge e;
+
+ /* Only optimize anything if the bb contains at least one clobber,
+ ends with resx (checked by caller), optionally contains some
+ debug stmts or labels, or at most one __builtin_stack_restore
+ call, and has an incoming EH edge. */
for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
{
gimple stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
- if (!gimple_clobber_p (stmt)
- || TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
- return;
+ if (gimple_clobber_p (stmt))
+ {
+ any_clobbers = true;
+ continue;
+ }
+ if (!seen_stack_restore
+ && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
+ {
+ seen_stack_restore = true;
+ continue;
+ }
+ if (gimple_code (stmt) == GIMPLE_LABEL)
+ break;
+ return;
+ }
+ if (!any_clobbers)
+ return;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (e->flags & EDGE_EH)
+ break;
+ if (e == NULL)
+ return;
+ gsi = gsi_last_bb (bb);
+ for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ if (!gimple_clobber_p (stmt))
+ continue;
unlink_stmt_vdef (stmt);
gsi_remove (&gsi, true);
release_defs (stmt);
gimple_stmt_iterator gsi, dgsi;
basic_block succbb;
bool any_clobbers = false;
+ unsigned todo = 0;
/* Only optimize if BB has a single EH successor and
all predecessor edges are EH too. */
continue;
if (gimple_code (stmt) == GIMPLE_LABEL)
break;
- if (!gimple_clobber_p (stmt)
- || TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
+ if (!gimple_clobber_p (stmt))
return 0;
any_clobbers = true;
}
if (!any_clobbers)
return 0;
- succbb = single_succ (bb);
+ edge succe = single_succ_edge (bb);
+ succbb = succe->dest;
+
+ /* See if there is a virtual PHI node to take an updated virtual
+ operand from. */
+ gimple vphi = NULL;
+ tree vuse = NULL_TREE;
+ for (gsi = gsi_start_phis (succbb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ tree res = gimple_phi_result (gsi_stmt (gsi));
+ if (virtual_operand_p (res))
+ {
+ vphi = gsi_stmt (gsi);
+ vuse = res;
+ break;
+ }
+ }
+
dgsi = gsi_after_labels (succbb);
gsi = gsi_last_bb (bb);
for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
{
gimple stmt = gsi_stmt (gsi);
+ tree lhs;
if (is_gimple_debug (stmt))
continue;
if (gimple_code (stmt) == GIMPLE_LABEL)
break;
- unlink_stmt_vdef (stmt);
+ lhs = gimple_assign_lhs (stmt);
+ /* Unfortunately we don't have dominance info updated at this
+ point, so checking if
+ dominated_by_p (CDI_DOMINATORS, succbb,
+ gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
+ would be too costly. Thus, avoid sinking any clobbers that
+ refer to non-(D) SSA_NAMEs. */
+ if (TREE_CODE (lhs) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
+ && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
+ {
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&gsi, true);
+ release_defs (stmt);
+ continue;
+ }
+
+ /* As we do not change stmt order when sinking across a
+ forwarder edge we can keep virtual operands in place. */
gsi_remove (&gsi, false);
- /* Trigger the operand scanner to cause renaming for virtual
- operands for this statement.
- ??? Given the simple structure of this code manually
- figuring out the reaching definition should not be too hard. */
- if (gimple_vuse (stmt))
- gimple_set_vuse (stmt, NULL_TREE);
- gsi_insert_before (&dgsi, stmt, GSI_SAME_STMT);
+ gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
+
+ /* But adjust virtual operands if we sunk across a PHI node. */
+ if (vuse)
+ {
+ gimple use_stmt;
+ imm_use_iterator iter;
+ use_operand_p use_p;
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, gimple_vdef (stmt));
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
+ {
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
+ }
+ /* Adjust the incoming virtual operand. */
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
+ SET_USE (gimple_vuse_op (stmt), vuse);
+ }
+ /* If there isn't a single predecessor but no virtual PHI node
+ arrange for virtual operands to be renamed. */
+ else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
+ && !single_pred_p (succbb))
+ {
+ /* In this case there will be no use of the VDEF of this stmt.
+ ??? Unless this is a secondary opportunity and we have not
+ removed unreachable blocks yet, so we cannot assert this.
+ Which also means we will end up renaming too many times. */
+ SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
+ mark_virtual_operands_for_renaming (cfun);
+ todo |= TODO_update_ssa_only_virtuals;
+ }
}
- return TODO_update_ssa_only_virtuals;
+ return todo;
}
/* At the end of inlining, we can lower EH_DISPATCH. Return true when
{
case ERT_TRY:
{
- vec<tree> labels = vNULL;
+ auto_vec<tree> labels;
tree default_label = NULL;
eh_catch c;
edge_iterator ei;
x = gimple_build_switch (filter, default_label, labels);
gsi_insert_before (&gsi, x, GSI_SAME_STMT);
-
- labels.release ();
}
pointer_set_destroy (seen_values);
}
return redirected;
}
-static unsigned
-execute_lower_eh_dispatch (void)
+namespace {
+
+const pass_data pass_data_lower_eh_dispatch =
+{
+ GIMPLE_PASS, /* type */
+ "ehdisp", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_execute */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_lower_eh_dispatch : public gimple_opt_pass
+{
+public:
+ pass_lower_eh_dispatch (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
+ virtual unsigned int execute (function *);
+
+}; // class pass_lower_eh_dispatch
+
+unsigned
+pass_lower_eh_dispatch::execute (function *fun)
{
basic_block bb;
int flags = 0;
assign_filter_values ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, fun)
{
gimple last = last_stmt (bb);
if (last == NULL)
return flags;
}
-static bool
-gate_lower_eh_dispatch (void)
-{
- return cfun->eh->region_tree != NULL;
-}
+} // anon namespace
-struct gimple_opt_pass pass_lower_eh_dispatch =
+gimple_opt_pass *
+make_pass_lower_eh_dispatch (gcc::context *ctxt)
{
- {
- GIMPLE_PASS,
- "ehdisp", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- gate_lower_eh_dispatch, /* gate */
- execute_lower_eh_dispatch, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_EH, /* tv_id */
- PROP_gimple_lcf, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_verify_flow /* todo_flags_finish */
- }
-};
+ return new pass_lower_eh_dispatch (ctxt);
+}
\f
/* Walk statements, see what regions and, optionally, landing pads
are really referenced.
else
lp_reachable = NULL;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
edge e_in, e_out;
/* Quickly check the edge counts on BB for singularity. */
- if (EDGE_COUNT (bb->preds) != 1 || EDGE_COUNT (bb->succs) != 1)
+ if (!single_pred_p (bb) || !single_succ_p (bb))
return false;
- e_in = EDGE_PRED (bb, 0);
- e_out = EDGE_SUCC (bb, 0);
+ e_in = single_pred_edge (bb);
+ e_out = single_succ_edge (bb);
/* Input edge must be EH and output edge must be normal. */
if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
gimple_stmt_iterator ngsi, ogsi;
edge_iterator ei;
edge e;
- bitmap rename_virts;
bitmap ophi_handled;
/* The destination block must not be a regular successor for any
redirect_edge_var_map_clear (e);
ophi_handled = BITMAP_ALLOC (NULL);
- rename_virts = BITMAP_ALLOC (NULL);
/* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
for the edges we're going to move. */
redirect_edge_var_map_add (e, nresult, oop, oloc);
}
}
- /* If we didn't find the PHI, but it's a VOP, remember to rename
- it later, assuming all other tests succeed. */
- else if (virtual_operand_p (nresult))
- bitmap_set_bit (rename_virts, SSA_NAME_VERSION (nresult));
- /* If we didn't find the PHI, and it's a real variable, we know
+ /* If we didn't find the PHI, if it's a real variable or a VOP, we know
from the fact that OLD_BB is tree_empty_eh_handler_p that the
variable is unchanged from input to the block and we can simply
re-use the input to NEW_BB from the OLD_BB_OUT edge. */
goto fail;
}
- /* At this point we know that the merge will succeed. Remove the PHI
- nodes for the virtuals that we want to rename. */
- if (!bitmap_empty_p (rename_virts))
- {
- for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); )
- {
- gimple nphi = gsi_stmt (ngsi);
- tree nresult = gimple_phi_result (nphi);
- if (bitmap_bit_p (rename_virts, SSA_NAME_VERSION (nresult)))
- {
- mark_virtual_phi_result_for_renaming (nphi);
- remove_phi_node (&ngsi, true);
- }
- else
- gsi_next (&ngsi);
- }
- }
-
/* Finally, move the edges and update the PHIs. */
for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
if (e->flags & EDGE_EH)
ei_next (&ei);
BITMAP_FREE (ophi_handled);
- BITMAP_FREE (rename_virts);
return true;
fail:
FOR_EACH_EDGE (e, ei, old_bb->preds)
redirect_edge_var_map_clear (e);
BITMAP_FREE (ophi_handled);
- BITMAP_FREE (rename_virts);
return false;
}
e_out = NULL;
break;
case 1:
- e_out = EDGE_SUCC (bb, 0);
+ e_out = single_succ_edge (bb);
break;
default:
return false;
/* If the block is totally empty, look for more unsplitting cases. */
if (gsi_end_p (gsi))
{
- /* For the degenerate case of an infinite loop bail out. */
- if (infinite_empty_loop_p (e_out))
+ /* For the degenerate case of an infinite loop bail out.
+ If bb has no successors and is totally empty, which can happen e.g.
+ because of incorrect noreturn attribute, bail out too. */
+ if (e_out == NULL
+ || infinite_empty_loop_p (e_out))
return ret;
return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
3) Info about regions that are containing instructions, and regions
reachable via local EH edges is collected
- 4) Eh tree is pruned for regions no longer neccesary.
+ 4) Eh tree is pruned for regions no longer necessary.
TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
Unify those that have the same failure decl and locus.
remove_unreachable_handlers ();
/* Watch out for the region tree vanishing due to all unreachable. */
- if (cfun->eh->region_tree && optimize)
+ if (cfun->eh->region_tree)
{
bool changed = false;
- changed |= unsplit_all_eh ();
+ if (optimize)
+ changed |= unsplit_all_eh ();
changed |= cleanup_all_empty_eh ();
if (changed)
return 0;
}
-static unsigned int
-execute_cleanup_eh (void)
+namespace {
+
+const pass_data pass_data_cleanup_eh =
+{
+ GIMPLE_PASS, /* type */
+ "ehcleanup", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_execute */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_cleanup_eh : public gimple_opt_pass
+{
+public:
+ pass_cleanup_eh (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
+ virtual bool gate (function *fun)
+ {
+ return fun->eh != NULL && fun->eh->region_tree != NULL;
+ }
+
+ virtual unsigned int execute (function *);
+
+}; // class pass_cleanup_eh
+
+unsigned int
+pass_cleanup_eh::execute (function *fun)
{
int ret = execute_cleanup_eh_1 ();
clear it. This exposes cross-language inlining opportunities
and avoids references to a never defined personality routine. */
if (DECL_FUNCTION_PERSONALITY (current_function_decl)
- && function_needs_eh_personality (cfun) != eh_personality_lang)
+ && function_needs_eh_personality (fun) != eh_personality_lang)
DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
return ret;
}
-static bool
-gate_cleanup_eh (void)
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_cleanup_eh (gcc::context *ctxt)
{
- return cfun->eh != NULL && cfun->eh->region_tree != NULL;
+ return new pass_cleanup_eh (ctxt);
}
-
-struct gimple_opt_pass pass_cleanup_eh = {
- {
- GIMPLE_PASS,
- "ehcleanup", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- gate_cleanup_eh, /* gate */
- execute_cleanup_eh, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_EH, /* tv_id */
- PROP_gimple_lcf, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
-};
\f
/* Verify that BB containing STMT as the last statement, has precisely the
edge that make_eh_edges would create. */