/* Conditional Dead Call Elimination pass for the GNU compiler.
- Copyright (C) 2008, 2009, 2010
- Free Software Foundation, Inc.
+ Copyright (C) 2008-2016 Free Software Foundation, Inc.
Contributed by Xinliang David Li <davidxl@google.com>
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "basic-block.h"
+#include "backend.h"
#include "tree.h"
-#include "gimple-pretty-print.h"
-#include "tree-flow.h"
#include "gimple.h"
+#include "cfghooks.h"
#include "tree-pass.h"
-#include "flags.h"
+#include "ssa.h"
+#include "gimple-pretty-print.h"
+#include "fold-const.h"
+#include "stor-layout.h"
+#include "gimple-iterator.h"
+#include "tree-cfg.h"
+#include "tree-into-ssa.h"
+#include "builtins.h"
+#include "internal-fn.h"
\f
-/* Conditional dead call elimination
+/* This pass serves two closely-related purposes:
+
+ 1. It conditionally executes calls that set errno if (a) the result of
+ the call is unused and (b) a simple range check on the arguments can
+ detect most cases where errno does not need to be set.
+
+ This is the "conditional dead-code elimination" that gave the pass
+ its original name, since the call is dead for most argument values.
+ The calls for which it helps are usually part of the C++ abstraction
+ penalty exposed after inlining.
+
+ 2. It looks for calls to built-in functions that set errno and whose
+ result is used. It checks whether there is an associated internal
+ function that doesn't set errno and whether the target supports
+ that internal function. If so, the pass uses the internal function
+ to compute the result of the built-in function but still arranges
+ for errno to be set when necessary. There are two ways of setting
+ errno:
+
+ a. by protecting the original call with the same argument checks as (1)
+
+ b. by protecting the original call with a check that the result
+ of the internal function is not equal to itself (i.e. is NaN).
+
+ (b) requires that NaNs are the only erroneous results. It is not
+ appropriate for functions like log, which returns ERANGE for zero
+ arguments. (b) is also likely to perform worse than (a) because it
+ requires the result to be calculated first. The pass therefore uses
+ (a) when it can and uses (b) as a fallback.
- Some builtin functions can set errno on error conditions, but they
- are otherwise pure. If the result of a call to such a function is
- not used, the compiler can still not eliminate the call without
- powerful interprocedural analysis to prove that the errno is not
- checked. However, if the conditions under which the error occurs
- are known, the compiler can conditionally dead code eliminate the
- calls by shrink-wrapping the semi-dead calls into the error condition:
+ For (b) the pass can replace the original call with a call to
+ IFN_SET_EDOM, if the target supports direct assignments to errno.
- built_in_call (args)
- ==>
- if (error_cond (args))
- built_in_call (args)
+ In both cases, arguments that require errno to be set should occur
+ rarely in practice. Checks of the errno result should also be rare,
+ but the compiler would need powerful interprocedural analysis to
+ prove that errno is not checked. It's much easier to add argument
+ checks or result checks instead.
- An actual simple example is :
- log (x); // Mostly dead call
+ An example of (1) is:
+
+ log (x); // Mostly dead call
==>
- if (x < 0)
- log (x);
+ if (__builtin_islessequal (x, 0))
+ log (x);
+
With this change, call to log (x) is effectively eliminated, as
- in majority of the cases, log won't be called with x out of
+ in the majority of the cases, log won't be called with x out of
range. The branch is totally predictable, so the branch cost
is low.
+ An example of (2) is:
+
+ y = sqrt (x);
+ ==>
+ y = IFN_SQRT (x);
+ if (__builtin_isless (x, 0))
+ sqrt (x);
+
+ In the vast majority of cases we should then never need to call sqrt.
+
Note that library functions are not supposed to clear errno to zero without
error. See IEEE Std 1003.1, section 2.3 Error Numbers, and section 7.5:3 of
ISO/IEC 9899 (C99).
The condition wrapping the builtin call is conservatively set to avoid too
- aggressive (wrong) shrink wrapping. The optimization is called conditional
- dead call elimination because the call is eliminated under the condition
- that the input arguments would not lead to domain or range error (for
- instance when x <= 0 for a log (x) call), however the chances that the error
- condition is hit is very low (those builtin calls which are conditionally
- dead are usually part of the C++ abstraction penalty exposed after
- inlining). */
+ aggressive (wrong) shrink wrapping. */
/* A structure for representing input domain of
to indicate if lb and ub value are inclusive
respectively. */
-typedef struct input_domain
+struct inp_domain
{
int lb;
int ub;
bool has_ub;
bool is_lb_inclusive;
bool is_ub_inclusive;
-} inp_domain;
+};
/* A helper function to construct and return an input
domain object. LB is the lower bound, HAS_LB is
check_target_format (tree arg)
{
tree type;
- enum machine_mode mode;
+ machine_mode mode;
const struct real_format *rfmt;
type = TREE_TYPE (arg);
#define MAX_BASE_INT_BIT_SIZE 32
static bool
-check_pow (gimple pow_call)
+check_pow (gcall *pow_call)
{
tree base, expn;
enum tree_code bc, ec;
/* Only handle a fixed range of constant. */
REAL_VALUE_TYPE mv;
REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
- if (REAL_VALUES_EQUAL (bcv, dconst1))
+ if (real_equal (&bcv, &dconst1))
return false;
- if (REAL_VALUES_LESS (bcv, dconst1))
+ if (real_less (&bcv, &dconst1))
return false;
- real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1);
- if (REAL_VALUES_LESS (mv, bcv))
+ real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED);
+ if (real_less (&mv, &bcv))
return false;
return true;
}
else if (bc == SSA_NAME)
{
tree base_val0, type;
- gimple base_def;
+ gimple *base_def;
int bit_sz;
/* Only handles cases where base value is converted
Returns true if the function call is a candidate. */
static bool
-check_builtin_call (gimple bcall)
+check_builtin_call (gcall *bcall)
{
tree arg;
return check_target_format (arg);
}
-/* A helper function to determine if a builtin function call is a
- candidate for conditional DCE. Returns true if the builtin call
- is a candidate. */
+/* Return true if built-in function call CALL calls a math function
+ and if we know how to test the range of its arguments to detect _most_
+ situations in which errno is not set. The test must err on the side
+ of treating non-erroneous values as potentially erroneous. */
static bool
-is_call_dce_candidate (gimple call)
+can_test_argument_range (gcall *call)
{
- tree fn;
- enum built_in_function fnc;
-
- /* Only potentially dead calls are considered. */
- if (gimple_call_lhs (call))
- return false;
-
- fn = gimple_call_fndecl (call);
- if (!fn
- || !DECL_BUILT_IN (fn)
- || (DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL))
- return false;
-
- fnc = DECL_FUNCTION_CODE (fn);
- switch (fnc)
+ switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call)))
{
/* Trig functions. */
CASE_FLT_FN (BUILT_IN_ACOS):
return false;
}
+/* Return true if CALL can produce a domain error (EDOM) but can never
+ produce a pole, range overflow or range underflow error (all ERANGE).
+ This means that we can tell whether a function would have set errno
+ by testing whether the result is a NaN. */
+
+static bool
+edom_only_function (gcall *call)
+{
+ switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call)))
+ {
+ CASE_FLT_FN (BUILT_IN_ACOS):
+ CASE_FLT_FN (BUILT_IN_ASIN):
+ CASE_FLT_FN (BUILT_IN_ATAN):
+ CASE_FLT_FN (BUILT_IN_COS):
+ CASE_FLT_FN (BUILT_IN_SIGNIFICAND):
+ CASE_FLT_FN (BUILT_IN_SIN):
+ CASE_FLT_FN (BUILT_IN_SQRT):
+ CASE_FLT_FN (BUILT_IN_FMOD):
+ CASE_FLT_FN (BUILT_IN_REMAINDER):
+ return true;
+
+ default:
+ return false;
+ }
+}
\f
-/* A helper function to generate gimple statements for
- one bound comparison. ARG is the call argument to
- be compared with the bound, LBUB is the bound value
- in integer, TCODE is the tree_code of the comparison,
- TEMP_NAME1/TEMP_NAME2 are names of the temporaries,
- CONDS is a vector holding the produced GIMPLE statements,
- and NCONDS points to the variable holding the number
- of logical comparisons. CONDS is either empty or
- a list ended with a null tree. */
+/* A helper function to generate gimple statements for one bound
+ comparison, so that the built-in function is called whenever
+ TCODE <ARG, LBUB> is *false*. TEMP_NAME1/TEMP_NAME2 are names
+ of the temporaries, CONDS is a vector holding the produced GIMPLE
+ statements, and NCONDS points to the variable holding the number of
+ logical comparisons. CONDS is either empty or a list ended with a
+ null tree. */
static void
gen_one_condition (tree arg, int lbub,
enum tree_code tcode,
const char *temp_name1,
const char *temp_name2,
- VEC (gimple, heap) *conds,
+ vec<gimple *> conds,
unsigned *nconds)
{
tree lbub_real_cst, lbub_cst, float_type;
tree temp, tempn, tempc, tempcn;
- gimple stmt1, stmt2, stmt3;
+ gassign *stmt1;
+ gassign *stmt2;
+ gcond *stmt3;
float_type = TREE_TYPE (arg);
lbub_cst = build_int_cst (integer_type_node, lbub);
gimple_assign_set_lhs (stmt2, tempcn);
stmt3 = gimple_build_cond_from_tree (tempcn, NULL_TREE, NULL_TREE);
- VEC_quick_push (gimple, conds, stmt1);
- VEC_quick_push (gimple, conds, stmt2);
- VEC_quick_push (gimple, conds, stmt3);
+ conds.quick_push (stmt1);
+ conds.quick_push (stmt2);
+ conds.quick_push (stmt3);
(*nconds)++;
}
static void
gen_conditions_for_domain (tree arg, inp_domain domain,
- VEC (gimple, heap) *conds,
+ vec<gimple *> conds,
unsigned *nconds)
{
if (domain.has_lb)
gen_one_condition (arg, domain.lb,
(domain.is_lb_inclusive
- ? LT_EXPR : LE_EXPR),
+ ? UNGE_EXPR : UNGT_EXPR),
"DCE_COND_LB", "DCE_COND_LB_TEST",
conds, nconds);
{
/* Now push a separator. */
if (domain.has_lb)
- VEC_quick_push (gimple, conds, NULL);
+ conds.quick_push (NULL);
gen_one_condition (arg, domain.ub,
(domain.is_ub_inclusive
- ? GT_EXPR : GE_EXPR),
+ ? UNLE_EXPR : UNLT_EXPR),
"DCE_COND_UB", "DCE_COND_UB_TEST",
conds, nconds);
}
See candidate selection in check_pow. Since the
candidates' base values have a limited range,
the guarded code generated for y are simple:
- if (y > max_y)
+ if (__builtin_isgreater (y, max_y))
pow (const, y);
Note max_y can be computed separately for each
const base, but in this implementation, we
static void
gen_conditions_for_pow_cst_base (tree base, tree expn,
- VEC (gimple, heap) *conds,
+ vec<gimple *> conds,
unsigned *nconds)
{
inp_domain exp_domain;
sure it is consistent with check_pow. */
REAL_VALUE_TYPE mv;
REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
- gcc_assert (!REAL_VALUES_EQUAL (bcv, dconst1)
- && !REAL_VALUES_LESS (bcv, dconst1));
- real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1);
- gcc_assert (!REAL_VALUES_LESS (mv, bcv));
+ gcc_assert (!real_equal (&bcv, &dconst1)
+ && !real_less (&bcv, &dconst1));
+ real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED);
+ gcc_assert (!real_less (&mv, &bcv));
exp_domain = get_domain (0, false, false,
127, true, false);
static void
gen_conditions_for_pow_int_base (tree base, tree expn,
- VEC (gimple, heap) *conds,
+ vec<gimple *> conds,
unsigned *nconds)
{
- gimple base_def;
+ gimple *base_def;
tree base_val0;
tree int_type;
tree temp, tempn;
tree cst0;
- gimple stmt1, stmt2;
+ gimple *stmt1, *stmt2;
int bit_sz, max_exp;
inp_domain exp_domain;
/* For pow ((double)x, y), generate the following conditions:
cond 1:
temp1 = x;
- if (temp1 <= 0)
+ if (__builtin_islessequal (temp1, 0))
cond 2:
temp2 = y;
- if (temp2 > max_exp_real_cst) */
+ if (__builtin_isgreater (temp2, max_exp_real_cst)) */
/* Generate condition in reverse order -- first
the condition for the exp argument. */
type is integer. */
/* Push a separator. */
- VEC_quick_push (gimple, conds, NULL);
+ conds.quick_push (NULL);
temp = create_tmp_var (int_type, "DCE_COND1");
cst0 = build_int_cst (int_type, 0);
stmt1 = gimple_build_assign (temp, base_val0);
tempn = make_ssa_name (temp, stmt1);
gimple_assign_set_lhs (stmt1, tempn);
- stmt2 = gimple_build_cond (LE_EXPR, tempn, cst0, NULL_TREE, NULL_TREE);
+ stmt2 = gimple_build_cond (GT_EXPR, tempn, cst0, NULL_TREE, NULL_TREE);
- VEC_quick_push (gimple, conds, stmt1);
- VEC_quick_push (gimple, conds, stmt2);
+ conds.quick_push (stmt1);
+ conds.quick_push (stmt2);
(*nconds)++;
}
/* Method to generate conditional statements for guarding conditionally
dead calls to pow. One or more statements can be generated for
each logical condition. Statement groups of different conditions
- are separated by a NULL tree and they are stored in the VEC
+ are separated by a NULL tree and they are stored in the vec
conds. The number of logical conditions are stored in *nconds.
See C99 standard, 7.12.7.4:2, for description of pow (x, y).
and *NCONDS is the number of logical conditions. */
static void
-gen_conditions_for_pow (gimple pow_call, VEC (gimple, heap) *conds,
+gen_conditions_for_pow (gcall *pow_call, vec<gimple *> conds,
unsigned *nconds)
{
tree base, expn;
condition are separated by NULL tree in the vector. */
static void
-gen_shrink_wrap_conditions (gimple bi_call, VEC (gimple, heap) *conds,
+gen_shrink_wrap_conditions (gcall *bi_call, vec<gimple *> conds,
unsigned int *nconds)
{
- gimple call;
+ gcall *call;
tree fn;
enum built_in_function fnc;
- gcc_assert (nconds && conds);
- gcc_assert (VEC_length (gimple, conds) == 0);
+ gcc_assert (nconds && conds.exists ());
+ gcc_assert (conds.length () == 0);
gcc_assert (is_gimple_call (bi_call));
call = bi_call;
/* Probability of the branch (to the call) is taken. */
#define ERR_PROB 0.01
-/* The function to shrink wrap a partially dead builtin call
- whose return value is not used anywhere, but has to be kept
- live due to potential error condition. Returns true if the
- transformation actually happens. */
+/* Shrink-wrap BI_CALL so that it is only called when one of the NCONDS
+ conditions in CONDS is false.
+
+ Return true on success, in which case the cfg will have been updated. */
static bool
-shrink_wrap_one_built_in_call (gimple bi_call)
+shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds,
+ unsigned int nconds)
{
gimple_stmt_iterator bi_call_bsi;
- basic_block bi_call_bb, join_tgt_bb, guard_bb, guard_bb0;
+ basic_block bi_call_bb, join_tgt_bb, guard_bb;
edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
edge bi_call_in_edge0, guard_bb_in_edge;
- VEC (gimple, heap) *conds;
- unsigned tn_cond_stmts, nconds;
+ unsigned tn_cond_stmts;
unsigned ci;
- gimple cond_expr = NULL;
- gimple cond_expr_start;
- tree bi_call_label_decl;
- gimple bi_call_label;
-
- conds = VEC_alloc (gimple, heap, 12);
- gen_shrink_wrap_conditions (bi_call, conds, &nconds);
-
- /* This can happen if the condition generator decides
- it is not beneficial to do the transformation. Just
- return false and do not do any transformation for
- the call. */
- if (nconds == 0)
- return false;
-
+ gimple *cond_expr = NULL;
+ gimple *cond_expr_start;
+
+ /* The cfg we want to create looks like this:
+
+ [guard n-1] <- guard_bb (old block)
+ | \
+ | [guard n-2] }
+ | / \ }
+ | / ... } new blocks
+ | / [guard 0] }
+ | / / | }
+ [ call ] | <- bi_call_bb }
+ | \ |
+ | \ |
+ | [ join ] <- join_tgt_bb (old iff call must end bb)
+ |
+ possible EH edges (only if [join] is old)
+
+ When [join] is new, the immediate dominators for these blocks are:
+
+ 1. [guard n-1]: unchanged
+ 2. [call]: [guard n-1]
+ 3. [guard m]: [guard m+1] for 0 <= m <= n-2
+ 4. [join]: [guard n-1]
+
+ We punt for the more complex case case of [join] being old and
+ simply free the dominance info. We also punt on postdominators,
+ which aren't expected to be available at this point anyway. */
bi_call_bb = gimple_bb (bi_call);
- /* Now find the join target bb -- split
- bi_call_bb if needed. */
- bi_call_bsi = gsi_for_stmt (bi_call);
+ /* Now find the join target bb -- split bi_call_bb if needed. */
+ if (stmt_ends_bb_p (bi_call))
+ {
+ /* If the call must be the last in the bb, don't split the block,
+ it could e.g. have EH edges. */
+ join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs);
+ if (join_tgt_in_edge_from_call == NULL)
+ return false;
+ free_dominance_info (CDI_DOMINATORS);
+ }
+ else
+ join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);
- join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);
bi_call_bsi = gsi_for_stmt (bi_call);
join_tgt_bb = join_tgt_in_edge_from_call->dest;
/* Now it is time to insert the first conditional expression
into bi_call_bb and split this bb so that bi_call is
shrink-wrapped. */
- tn_cond_stmts = VEC_length (gimple, conds);
+ tn_cond_stmts = conds.length ();
cond_expr = NULL;
- cond_expr_start = VEC_index (gimple, conds, 0);
+ cond_expr_start = conds[0];
for (ci = 0; ci < tn_cond_stmts; ci++)
{
- gimple c = VEC_index (gimple, conds, ci);
+ gimple *c = conds[ci];
gcc_assert (c || ci != 0);
if (!c)
break;
ci++;
gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
- /* Now the label. */
- bi_call_label_decl = create_artificial_label (gimple_location (bi_call));
- bi_call_label = gimple_build_label (bi_call_label_decl);
- gsi_insert_before (&bi_call_bsi, bi_call_label, GSI_SAME_STMT);
-
bi_call_in_edge0 = split_block (bi_call_bb, cond_expr);
bi_call_in_edge0->flags &= ~EDGE_FALLTHRU;
- bi_call_in_edge0->flags |= EDGE_TRUE_VALUE;
- guard_bb0 = bi_call_bb;
+ bi_call_in_edge0->flags |= EDGE_FALSE_VALUE;
+ guard_bb = bi_call_bb;
bi_call_bb = bi_call_in_edge0->dest;
- join_tgt_in_edge_fall_thru = make_edge (guard_bb0, join_tgt_bb,
- EDGE_FALSE_VALUE);
+ join_tgt_in_edge_fall_thru = make_edge (guard_bb, join_tgt_bb,
+ EDGE_TRUE_VALUE);
bi_call_in_edge0->probability = REG_BR_PROB_BASE * ERR_PROB;
bi_call_in_edge0->count =
- apply_probability (guard_bb0->count,
+ apply_probability (guard_bb->count,
bi_call_in_edge0->probability);
join_tgt_in_edge_fall_thru->probability =
inverse_probability (bi_call_in_edge0->probability);
join_tgt_in_edge_fall_thru->count =
- guard_bb0->count - bi_call_in_edge0->count;
+ guard_bb->count - bi_call_in_edge0->count;
/* Code generation for the rest of the conditions */
- guard_bb = guard_bb0;
while (nconds > 0)
{
unsigned ci0;
edge bi_call_in_edge;
gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start);
ci0 = ci;
- cond_expr_start = VEC_index (gimple, conds, ci0);
+ cond_expr_start = conds[ci0];
for (; ci < tn_cond_stmts; ci++)
{
- gimple c = VEC_index (gimple, conds, ci);
+ gimple *c = conds[ci];
gcc_assert (c || ci != ci0);
if (!c)
break;
gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
guard_bb_in_edge = split_block (guard_bb, cond_expr);
guard_bb_in_edge->flags &= ~EDGE_FALLTHRU;
- guard_bb_in_edge->flags |= EDGE_FALSE_VALUE;
+ guard_bb_in_edge->flags |= EDGE_TRUE_VALUE;
- bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_TRUE_VALUE);
+ bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_FALSE_VALUE);
bi_call_in_edge->probability = REG_BR_PROB_BASE * ERR_PROB;
bi_call_in_edge->count =
guard_bb_in_edge->count = guard_bb->count - bi_call_in_edge->count;
}
- VEC_free (gimple, heap, conds);
+ if (dom_info_available_p (CDI_DOMINATORS))
+ {
+ /* The split_blocks leave [guard 0] as the immediate dominator
+ of [call] and [call] as the immediate dominator of [join].
+ Fix them up. */
+ set_immediate_dominator (CDI_DOMINATORS, bi_call_bb, guard_bb);
+ set_immediate_dominator (CDI_DOMINATORS, join_tgt_bb, guard_bb);
+ }
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
location_t loc;
return true;
}
+/* Shrink-wrap BI_CALL so that it is only called when it might set errno
+ (but is always called if it would set errno).
+
+ Return true on success, in which case the cfg will have been updated. */
+
+static bool
+shrink_wrap_one_built_in_call (gcall *bi_call)
+{
+ unsigned nconds = 0;
+ auto_vec<gimple *, 12> conds;
+ gen_shrink_wrap_conditions (bi_call, conds, &nconds);
+ /* This can happen if the condition generator decides
+ it is not beneficial to do the transformation. Just
+ return false and do not do any transformation for
+ the call. */
+ if (nconds == 0)
+ return false;
+ return shrink_wrap_one_built_in_call_with_conds (bi_call, conds, nconds);
+}
+
+/* Return true if built-in function call CALL could be implemented using
+ a combination of an internal function to compute the result and a
+ separate call to set errno. */
+
+static bool
+can_use_internal_fn (gcall *call)
+{
+ /* Only replace calls that set errno. */
+ if (!gimple_vdef (call))
+ return false;
+
+ /* Punt if we can't conditionalize the call. */
+ basic_block bb = gimple_bb (call);
+ if (stmt_ends_bb_p (call) && !find_fallthru_edge (bb->succs))
+ return false;
+
+ /* See whether there is an internal function for this built-in. */
+ if (replacement_internal_fn (call) == IFN_LAST)
+ return false;
+
+ /* See whether we can catch all cases where errno would be set,
+ while still avoiding the call in most cases. */
+ if (!can_test_argument_range (call)
+ && !edom_only_function (call))
+ return false;
+
+ return true;
+}
+
+/* Implement built-in function call CALL using an internal function.
+ Return true on success, in which case the cfg will have changed. */
+
+static bool
+use_internal_fn (gcall *call)
+{
+ unsigned nconds = 0;
+ auto_vec<gimple *, 12> conds;
+ if (can_test_argument_range (call))
+ gen_shrink_wrap_conditions (call, conds, &nconds);
+ if (nconds == 0 && !edom_only_function (call))
+ return false;
+
+ internal_fn ifn = replacement_internal_fn (call);
+ gcc_assert (ifn != IFN_LAST);
+
+ /* Construct the new call, with the same arguments as the original one. */
+ auto_vec <tree, 16> args;
+ unsigned int nargs = gimple_call_num_args (call);
+ for (unsigned int i = 0; i < nargs; ++i)
+ args.safe_push (gimple_call_arg (call, i));
+ gcall *new_call = gimple_build_call_internal_vec (ifn, args);
+ gimple_set_location (new_call, gimple_location (call));
+
+ /* Transfer the LHS to the new call. */
+ tree lhs = gimple_call_lhs (call);
+ gimple_call_set_lhs (new_call, lhs);
+ gimple_call_set_lhs (call, NULL_TREE);
+ SSA_NAME_DEF_STMT (lhs) = new_call;
+
+ /* Insert the new call. */
+ gimple_stmt_iterator gsi = gsi_for_stmt (call);
+ gsi_insert_before (&gsi, new_call, GSI_SAME_STMT);
+
+ if (nconds == 0)
+ {
+ /* Skip the call if LHS == LHS. If we reach here, EDOM is the only
+ valid errno value and it is used iff the result is NaN. */
+ conds.quick_push (gimple_build_cond (EQ_EXPR, lhs, lhs,
+ NULL_TREE, NULL_TREE));
+ nconds++;
+
+ /* Try replacing the original call with a direct assignment to
+ errno, via an internal function. */
+ if (set_edom_supported_p () && !stmt_ends_bb_p (call))
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (call);
+ gcall *new_call = gimple_build_call_internal (IFN_SET_EDOM, 0);
+ gimple_set_vuse (new_call, gimple_vuse (call));
+ gimple_set_vdef (new_call, gimple_vdef (call));
+ SSA_NAME_DEF_STMT (gimple_vdef (new_call)) = new_call;
+ gimple_set_location (new_call, gimple_location (call));
+ gsi_replace (&gsi, new_call, false);
+ call = new_call;
+ }
+ }
+
+ if (!shrink_wrap_one_built_in_call_with_conds (call, conds, nconds))
+ /* It's too late to back out now. */
+ gcc_unreachable ();
+ return true;
+}
+
/* The top level function for conditional dead code shrink
wrapping transformation. */
static bool
-shrink_wrap_conditional_dead_built_in_calls (VEC (gimple, heap) *calls)
+shrink_wrap_conditional_dead_built_in_calls (vec<gcall *> calls)
{
bool changed = false;
unsigned i = 0;
- unsigned n = VEC_length (gimple, calls);
+ unsigned n = calls.length ();
if (n == 0)
return false;
for (; i < n ; i++)
{
- gimple bi_call = VEC_index (gimple, calls, i);
- changed |= shrink_wrap_one_built_in_call (bi_call);
+ gcall *bi_call = calls[i];
+ if (gimple_call_lhs (bi_call))
+ changed |= use_internal_fn (bi_call);
+ else
+ changed |= shrink_wrap_one_built_in_call (bi_call);
}
return changed;
}
-/* Pass entry points. */
+namespace {
+
+const pass_data pass_data_call_cdce =
+{
+ GIMPLE_PASS, /* type */
+ "cdce", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_TREE_CALL_CDCE, /* tv_id */
+ ( PROP_cfg | PROP_ssa ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
-static unsigned int
-tree_call_cdce (void)
+class pass_call_cdce : public gimple_opt_pass
+{
+public:
+ pass_call_cdce (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_call_cdce, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ /* The limit constants used in the implementation
+ assume IEEE floating point format. Other formats
+ can be supported in the future if needed. */
+ return flag_tree_builtin_call_dce != 0;
+ }
+
+ virtual unsigned int execute (function *);
+
+}; // class pass_call_cdce
+
+unsigned int
+pass_call_cdce::execute (function *fun)
{
basic_block bb;
gimple_stmt_iterator i;
bool something_changed = false;
- VEC (gimple, heap) *cond_dead_built_in_calls = NULL;
- FOR_EACH_BB (bb)
+ auto_vec<gcall *> cond_dead_built_in_calls;
+ FOR_EACH_BB_FN (bb, fun)
{
+ /* Skip blocks that are being optimized for size, since our
+ transformation always increases code size. */
+ if (optimize_bb_for_size_p (bb))
+ continue;
+
/* Collect dead call candidates. */
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
{
- gimple stmt = gsi_stmt (i);
- if (is_gimple_call (stmt)
- && is_call_dce_candidate (stmt))
+ gcall *stmt = dyn_cast <gcall *> (gsi_stmt (i));
+ if (stmt
+ && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
+ && (gimple_call_lhs (stmt)
+ ? can_use_internal_fn (stmt)
+ : can_test_argument_range (stmt)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
fprintf (dump_file, "\n");
}
- if (cond_dead_built_in_calls == NULL)
- cond_dead_built_in_calls = VEC_alloc (gimple, heap, 64);
- VEC_safe_push (gimple, heap, cond_dead_built_in_calls, stmt);
+ if (!cond_dead_built_in_calls.exists ())
+ cond_dead_built_in_calls.create (64);
+ cond_dead_built_in_calls.safe_push (stmt);
}
}
}
- if (cond_dead_built_in_calls == NULL)
+ if (!cond_dead_built_in_calls.exists ())
return 0;
something_changed
= shrink_wrap_conditional_dead_built_in_calls (cond_dead_built_in_calls);
- VEC_free (gimple, heap, cond_dead_built_in_calls);
-
if (something_changed)
{
- free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
/* As we introduced new control-flow we need to insert PHI-nodes
for the call-clobbers of the remaining call. */
- mark_virtual_operands_for_renaming (cfun);
- return (TODO_update_ssa | TODO_cleanup_cfg | TODO_ggc_collect
- | TODO_remove_unused_locals);
+ mark_virtual_operands_for_renaming (fun);
+ return TODO_update_ssa;
}
- else
- return 0;
-}
-static bool
-gate_call_cdce (void)
-{
- /* The limit constants used in the implementation
- assume IEEE floating point format. Other formats
- can be supported in the future if needed. */
- return flag_tree_builtin_call_dce != 0 && optimize_function_for_speed_p (cfun);
+ return 0;
}
-struct gimple_opt_pass pass_call_cdce =
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_call_cdce (gcc::context *ctxt)
{
- {
- GIMPLE_PASS,
- "cdce", /* name */
- gate_call_cdce, /* gate */
- tree_call_cdce, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_CALL_CDCE, /* tv_id */
- PROP_cfg | PROP_ssa, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_verify_ssa /* todo_flags_finish */
- }
-};
+ return new pass_call_cdce (ctxt);
+}