/* Functions to determine/estimate number of iterations of a loop.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of GCC.
*/
-/* Returns true if ARG is either NULL_TREE or constant zero. Unlike
- integer_zerop, it does not care about overflow flags. */
-
-bool
-zero_p (tree arg)
-{
- if (!arg)
- return true;
-
- if (TREE_CODE (arg) != INTEGER_CST)
- return false;
-
- return (TREE_INT_CST_LOW (arg) == 0 && TREE_INT_CST_HIGH (arg) == 0);
-}
-
-/* Returns true if ARG a nonzero constant. Unlike integer_nonzerop, it does
- not care about overflow flags. */
-
-static bool
-nonzero_p (tree arg)
-{
- if (!arg)
- return false;
-
- if (TREE_CODE (arg) != INTEGER_CST)
- return false;
-
- return (TREE_INT_CST_LOW (arg) != 0 || TREE_INT_CST_HIGH (arg) != 0);
-}
-
/* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
static tree
assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
assumption = fold_build2 (EQ_EXPR, boolean_type_node,
assumption, build_int_cst (niter_type, 0));
- if (!nonzero_p (assumption))
+ if (!integer_nonzerop (assumption))
niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter->assumptions, assumption);
}
if (TREE_CODE (mod) != INTEGER_CST)
return false;
- if (nonzero_p (mod))
+ if (integer_nonzerop (mod))
mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
tmod = fold_convert (type, mod);
- if (nonzero_p (iv0->step))
+ if (integer_nonzerop (iv0->step))
{
/* The final value of the iv is iv1->base + MOD, assuming that this
computation does not overflow, and that
iv0->base <= iv1->base + MOD. */
- if (!iv1->no_overflow && !zero_p (mod))
+ if (!iv1->no_overflow && !integer_zerop (mod))
{
bound = fold_build2 (MINUS_EXPR, type,
TYPE_MAX_VALUE (type), tmod);
assumption = fold_build2 (LE_EXPR, boolean_type_node,
iv1->base, bound);
- if (zero_p (assumption))
+ if (integer_zerop (assumption))
return false;
}
noloop = fold_build2 (GT_EXPR, boolean_type_node,
/* The final value of the iv is iv0->base - MOD, assuming that this
computation does not overflow, and that
iv0->base - MOD <= iv1->base. */
- if (!iv0->no_overflow && !zero_p (mod))
+ if (!iv0->no_overflow && !integer_zerop (mod))
{
bound = fold_build2 (PLUS_EXPR, type,
TYPE_MIN_VALUE (type), tmod);
assumption = fold_build2 (GE_EXPR, boolean_type_node,
iv0->base, bound);
- if (zero_p (assumption))
+ if (integer_zerop (assumption))
return false;
}
noloop = fold_build2 (GT_EXPR, boolean_type_node,
iv1->base);
}
- if (!nonzero_p (assumption))
+ if (!integer_nonzerop (assumption))
niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter->assumptions,
assumption);
- if (!zero_p (noloop))
+ if (!integer_zerop (noloop))
niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
niter->may_be_zero,
noloop);
tree bound, d, assumption, diff;
tree niter_type = TREE_TYPE (step);
- if (nonzero_p (iv0->step))
+ if (integer_nonzerop (iv0->step))
{
/* for (i = iv0->base; i < iv1->base; i += iv0->step) */
if (iv0->no_overflow)
iv0->base, bound);
}
- if (zero_p (assumption))
+ if (integer_zerop (assumption))
return false;
- if (!nonzero_p (assumption))
+ if (!integer_nonzerop (assumption))
niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter->assumptions, assumption);
tree assumption = boolean_true_node, bound, diff;
tree mbz, mbzl, mbzr;
- if (nonzero_p (iv0->step))
+ if (integer_nonzerop (iv0->step))
{
diff = fold_build2 (MINUS_EXPR, type,
iv0->step, build_int_cst (type, 1));
mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
- if (!nonzero_p (assumption))
+ if (!integer_nonzerop (assumption))
niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter->assumptions, assumption);
- if (!zero_p (mbz))
+ if (!integer_zerop (mbz))
niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
niter->may_be_zero, mbz);
}
tree niter_type = unsigned_type_for (type);
tree delta, step, s;
- if (nonzero_p (iv0->step))
+ if (integer_nonzerop (iv0->step))
{
niter->control = *iv0;
niter->cmp = LT_EXPR;
fold_convert (niter_type, iv0->base));
/* First handle the special case that the step is +-1. */
- if ((iv0->step && integer_onep (iv0->step)
- && zero_p (iv1->step))
- || (iv1->step && integer_all_onesp (iv1->step)
- && zero_p (iv0->step)))
+ if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
+ || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
{
/* for (i = iv0->base; i < iv1->base; i++)
return true;
}
- if (nonzero_p (iv0->step))
+ if (integer_nonzerop (iv0->step))
step = fold_convert (niter_type, iv0->step);
else
step = fold_convert (niter_type,
if (!never_infinite)
{
- if (nonzero_p (iv0->step))
+ if (integer_nonzerop (iv0->step))
assumption = fold_build2 (NE_EXPR, boolean_type_node,
iv1->base, TYPE_MAX_VALUE (type));
else
assumption = fold_build2 (NE_EXPR, boolean_type_node,
iv0->base, TYPE_MIN_VALUE (type));
- if (zero_p (assumption))
+ if (integer_zerop (assumption))
return false;
- if (!nonzero_p (assumption))
+ if (!integer_nonzerop (assumption))
niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter->assumptions, assumption);
}
- if (nonzero_p (iv0->step))
+ if (integer_nonzerop (iv0->step))
iv1->base = fold_build2 (PLUS_EXPR, type,
iv1->base, build_int_cst (type, 1));
else
/* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
the control variable is on lhs. */
if (code == GE_EXPR || code == GT_EXPR
- || (code == NE_EXPR && zero_p (iv0->step)))
+ || (code == NE_EXPR && integer_zerop (iv0->step)))
{
SWAP (iv0, iv1);
code = swap_tree_comparison (code);
/* If the control induction variable does not overflow, the loop obviously
cannot be infinite. */
- if (!zero_p (iv0->step) && iv0->no_overflow)
+ if (!integer_zerop (iv0->step) && iv0->no_overflow)
never_infinite = true;
- else if (!zero_p (iv1->step) && iv1->no_overflow)
+ else if (!integer_zerop (iv1->step) && iv1->no_overflow)
never_infinite = true;
else
never_infinite = false;
/* We can handle the case when neither of the sides of the comparison is
invariant, provided that the test is NE_EXPR. This rarely occurs in
practice, but it is simple enough to manage. */
- if (!zero_p (iv0->step) && !zero_p (iv1->step))
+ if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
{
if (code != NE_EXPR)
return false;
iv0->step = fold_binary_to_constant (MINUS_EXPR, type,
iv0->step, iv1->step);
iv0->no_overflow = false;
- iv1->step = NULL_TREE;
+ iv1->step = build_int_cst (type, 0);
iv1->no_overflow = true;
}
/* If the result of the comparison is a constant, the loop is weird. More
precise handling would be possible, but the situation is not common enough
to waste time on it. */
- if (zero_p (iv0->step) && zero_p (iv1->step))
+ if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
return false;
/* Ignore loops of while (i-- < 10) type. */
if (iv0->step && tree_int_cst_sign_bit (iv0->step))
return false;
- if (!zero_p (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
+ if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
return false;
}
/* If the loop exits immediately, there is nothing to do. */
- if (zero_p (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
+ if (integer_zerop (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
{
niter->niter = build_int_cst (unsigned_type_for (type), 0);
return true;
switch (code)
{
case NE_EXPR:
- gcc_assert (zero_p (iv1->step));
+ gcc_assert (integer_zerop (iv1->step));
return number_of_iterations_ne (type, iv0, iv1->base, niter, never_infinite);
case LT_EXPR:
return number_of_iterations_lt (type, iv0, iv1, niter, never_infinite);
|| operand_equal_p (expr, old, 0))
return unshare_expr (new);
- if (!EXPR_P (expr))
+ if (!EXPR_P (expr) && !GIMPLE_STMT_P (expr))
return expr;
- n = TREE_CODE_LENGTH (TREE_CODE (expr));
+ n = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < n; i++)
{
e = TREE_OPERAND (expr, i);
code = TREE_CODE (expr);
if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
{
- n = TREE_CODE_LENGTH (code);
+ n = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < n; i++)
{
e = TREE_OPERAND (expr, i);
TREE_OPERAND (ret, i) = ee;
}
- return (ret ? fold (ret) : expr);
+ if (!ret)
+ return expr;
+
+ fold_defer_overflow_warnings ();
+ ret = fold (ret);
+ fold_undefer_and_ignore_overflow_warnings ();
+ return ret;
}
if (TREE_CODE (expr) != SSA_NAME)
return expr;
stmt = SSA_NAME_DEF_STMT (expr);
- if (TREE_CODE (stmt) != MODIFY_EXPR)
+ if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return expr;
- e = TREE_OPERAND (stmt, 1);
+ e = GIMPLE_STMT_OPERAND (stmt, 1);
if (/* Casts are simple. */
TREE_CODE (e) != NOP_EXPR
&& TREE_CODE (e) != CONVERT_EXPR
/* We know that e0 == e1. Check whether we cannot simplify expr
using this fact. */
e = simplify_replace_tree (expr, e0, e1);
- if (zero_p (e) || nonzero_p (e))
+ if (integer_zerop (e) || integer_nonzerop (e))
return e;
e = simplify_replace_tree (expr, e1, e0);
- if (zero_p (e) || nonzero_p (e))
+ if (integer_zerop (e) || integer_nonzerop (e))
return e;
}
if (TREE_CODE (expr) == EQ_EXPR)
/* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
e = simplify_replace_tree (cond, e0, e1);
- if (zero_p (e))
+ if (integer_zerop (e))
return e;
e = simplify_replace_tree (cond, e1, e0);
- if (zero_p (e))
+ if (integer_zerop (e))
return e;
}
if (TREE_CODE (expr) == NE_EXPR)
/* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
e = simplify_replace_tree (cond, e0, e1);
- if (zero_p (e))
+ if (integer_zerop (e))
return boolean_true_node;
e = simplify_replace_tree (cond, e1, e0);
- if (zero_p (e))
+ if (integer_zerop (e))
return boolean_true_node;
}
/* Check whether COND ==> EXPR. */
notcond = invert_truthvalue (cond);
e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
- if (nonzero_p (e))
+ if (e && integer_nonzerop (e))
return e;
/* Check whether COND ==> not EXPR. */
e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
- if (e && zero_p (e))
+ if (e && integer_zerop (e))
return e;
return expr;
unsigned i;
tree call;
- if (exit != loop->single_exit)
+ if (exit != single_exit (loop))
return false;
body = get_loop_body (loop);
if (!simple_iv (loop, stmt, op1, &iv1, false))
return false;
+ /* We don't want to see undefined signed overflow warnings while
+ computing the number of iterations. */
+ fold_defer_overflow_warnings ();
+
iv0.base = expand_simple_operations (iv0.base);
iv1.base = expand_simple_operations (iv1.base);
if (!number_of_iterations_cond (type, &iv0, code, &iv1, niter,
loop_only_exit_p (loop, exit)))
- return false;
+ {
+ fold_undefer_and_ignore_overflow_warnings ();
+ return false;
+ }
if (optimize >= 3)
{
niter->may_be_zero,
&niter->additional_info);
+ fold_undefer_and_ignore_overflow_warnings ();
+
if (integer_onep (niter->assumptions))
return true;
/* We can provide a more specific warning if one of the operator is
constant and the other advances by +1 or -1. */
- if (!zero_p (iv1.step)
- ? (zero_p (iv0.step)
+ if (!integer_zerop (iv1.step)
+ ? (integer_zerop (iv0.step)
&& (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
- : (iv0.step
- && (integer_onep (iv0.step) || integer_all_onesp (iv0.step))))
+ : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop is not infinite")
tree
find_loop_niter (struct loop *loop, edge *exit)
{
- unsigned n_exits, i;
- edge *exits = get_loop_exit_edges (loop, &n_exits);
+ unsigned i;
+ VEC (edge, heap) *exits = get_loop_exit_edges (loop);
edge ex;
tree niter = NULL_TREE, aniter;
struct tree_niter_desc desc;
*exit = NULL;
- for (i = 0; i < n_exits; i++)
+ for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
{
- ex = exits[i];
if (!just_once_each_iteration_p (loop, ex->src))
continue;
if (!number_of_iterations_exit (loop, ex, &desc, false))
continue;
- if (nonzero_p (desc.may_be_zero))
+ if (integer_nonzerop (desc.may_be_zero))
{
/* We exit in the first iteration through this exit.
We won't find anything better. */
break;
}
- if (!zero_p (desc.may_be_zero))
+ if (!integer_zerop (desc.may_be_zero))
continue;
aniter = desc.niter;
continue;
}
}
- free (exits);
+ VEC_free (edge, heap, exits);
return niter ? niter : chrec_dont_know;
}
return NULL_TREE;
}
- if (TREE_CODE (stmt) != MODIFY_EXPR)
+ if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
return NULL_TREE;
if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
nx = USE_FROM_PTR (op);
val = get_val_for (nx, base);
SET_USE (op, val);
- val = fold (TREE_OPERAND (stmt, 1));
+ val = fold (GIMPLE_STMT_OPERAND (stmt, 1));
SET_USE (op, nx);
/* only iterate loop once. */
return val;
}
/* Should never reach here. */
- gcc_unreachable();
+ gcc_unreachable ();
}
/* Tries to count the number of iterations of LOOP till it exits by EXIT
}
}
+ /* Don't issue signed overflow warnings. */
+ fold_defer_overflow_warnings ();
+
for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
{
for (j = 0; j < 2; j++)
aval[j] = get_val_for (op[j], val[j]);
acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
- if (acnd && zero_p (acnd))
+ if (acnd && integer_zerop (acnd))
{
+ fold_undefer_and_ignore_overflow_warnings ();
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"Proved that loop %d iterates %d times using brute force.\n",
{
val[j] = get_val_for (next[j], val[j]);
if (!is_gimple_min_invariant (val[j]))
- return chrec_dont_know;
+ {
+ fold_undefer_and_ignore_overflow_warnings ();
+ return chrec_dont_know;
+ }
}
}
+ fold_undefer_and_ignore_overflow_warnings ();
+
return chrec_dont_know;
}
tree
find_loop_niter_by_eval (struct loop *loop, edge *exit)
{
- unsigned n_exits, i;
- edge *exits = get_loop_exit_edges (loop, &n_exits);
+ unsigned i;
+ VEC (edge, heap) *exits = get_loop_exit_edges (loop);
edge ex;
tree niter = NULL_TREE, aniter;
*exit = NULL;
- for (i = 0; i < n_exits; i++)
+ for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
{
- ex = exits[i];
if (!just_once_each_iteration_p (loop, ex->src))
continue;
niter = aniter;
*exit = ex;
}
- free (exits);
+ VEC_free (edge, heap, exits);
return niter ? niter : chrec_dont_know;
}
if (tree_expr_nonnegative_p (val))
return true;
- if (nonzero_p (cond))
+ if (integer_nonzerop (cond))
return false;
compare = fold_build2 (GE_EXPR,
boolean_type_node, val, build_int_cst (type, 0));
compare = tree_simplify_using_condition_1 (cond, compare);
- return nonzero_p (compare);
+ return integer_nonzerop (compare);
}
/* Returns true if we can prove that COND ==> A >= B. */
{
tree compare = fold_build2 (GE_EXPR, boolean_type_node, a, b);
- if (nonzero_p (compare))
+ if (integer_nonzerop (compare))
return true;
- if (nonzero_p (cond))
+ if (integer_nonzerop (cond))
return false;
compare = tree_simplify_using_condition_1 (cond, compare);
- return nonzero_p (compare);
+ return integer_nonzerop (compare);
}
/* Returns a constant upper bound on the value of expression VAL. VAL
tree type = TREE_TYPE (val);
tree op0, op1, subtype, maxt;
double_int bnd, max, mmax, cst;
+ tree stmt;
if (INTEGRAL_TYPE_P (type))
maxt = TYPE_MAX_VALUE (type);
bnd = derive_constant_upper_bound (op0, additional);
return double_int_udiv (bnd, tree_to_double_int (op1), FLOOR_DIV_EXPR);
+ case BIT_AND_EXPR:
+ op1 = TREE_OPERAND (val, 1);
+ if (TREE_CODE (op1) != INTEGER_CST
+ || tree_int_cst_sign_bit (op1))
+ return max;
+ return tree_to_double_int (op1);
+
+ case SSA_NAME:
+ stmt = SSA_NAME_DEF_STMT (val);
+ if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
+ || GIMPLE_STMT_OPERAND (stmt, 0) != val)
+ return max;
+ return derive_constant_upper_bound (GIMPLE_STMT_OPERAND (stmt, 1),
+ additional);
+
default:
return max;
}
}
-/* Records that AT_STMT is executed at most BOUND times in LOOP. The
- additional condition ADDITIONAL is recorded with the bound. */
+/* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. The
+ additional condition ADDITIONAL is recorded with the bound. IS_EXIT
+ is true if the loop is exited immediately after STMT, and this exit
+ is taken at last when the STMT is executed BOUND + 1 times.
+ REALISTIC is true if the estimate comes from a reliable source
+ (number of iterations analysis, or size of data accessed in the loop). */
-void
-record_estimate (struct loop *loop, tree bound, tree additional, tree at_stmt)
+static void
+record_estimate (struct loop *loop, tree bound, tree additional, tree at_stmt,
+ bool is_exit, bool realistic)
{
struct nb_iter_bound *elt = xmalloc (sizeof (struct nb_iter_bound));
double_int i_bound = derive_constant_upper_bound (bound, additional);
- tree c_bound = double_int_to_tree (unsigned_type_for (TREE_TYPE (bound)),
- i_bound);
if (dump_file && (dump_flags & TDF_DETAILS))
{
- fprintf (dump_file, "Statements after ");
+ fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
print_generic_expr (dump_file, at_stmt, TDF_SLIM);
- fprintf (dump_file, " are executed at most ");
+ fprintf (dump_file, " is executed at most ");
print_generic_expr (dump_file, bound, TDF_SLIM);
fprintf (dump_file, " (bounded by ");
- print_generic_expr (dump_file, c_bound, TDF_SLIM);
- fprintf (dump_file, ") times in loop %d.\n", loop->num);
+ dump_double_int (dump_file, i_bound, true);
+ fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
}
- elt->bound = c_bound;
- elt->at_stmt = at_stmt;
+ elt->bound = i_bound;
+ elt->stmt = at_stmt;
+ elt->is_exit = is_exit;
+ elt->realistic = realistic && TREE_CODE (bound) == INTEGER_CST;
elt->next = loop->bounds;
loop->bounds = elt;
}
+/* Record the estimate on number of iterations of LOOP based on the fact that
+ the induction variable BASE + STEP * i evaluated in STMT does not wrap and
+ its values belong to the range <LOW, HIGH>. DATA_SIZE_BOUNDS_P is true if
+ LOW and HIGH are derived from the size of data. */
+
+static void
+record_nonwrapping_iv (struct loop *loop, tree base, tree step, tree stmt,
+ tree low, tree high, bool data_size_bounds_p)
+{
+ tree niter_bound, extreme, delta;
+ tree type = TREE_TYPE (base), unsigned_type;
+
+ if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
+ return;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Induction variable (");
+ print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
+ fprintf (dump_file, ") ");
+ print_generic_expr (dump_file, base, TDF_SLIM);
+ fprintf (dump_file, " + ");
+ print_generic_expr (dump_file, step, TDF_SLIM);
+ fprintf (dump_file, " * iteration does not wrap in statement ");
+ print_generic_expr (dump_file, stmt, TDF_SLIM);
+ fprintf (dump_file, " in loop %d.\n", loop->num);
+ }
+
+ unsigned_type = unsigned_type_for (type);
+ base = fold_convert (unsigned_type, base);
+ step = fold_convert (unsigned_type, step);
+
+ if (tree_int_cst_sign_bit (step))
+ {
+ extreme = fold_convert (unsigned_type, low);
+ if (TREE_CODE (base) != INTEGER_CST)
+ base = fold_convert (unsigned_type, high);
+ delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
+ step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
+ }
+ else
+ {
+ extreme = fold_convert (unsigned_type, high);
+ if (TREE_CODE (base) != INTEGER_CST)
+ base = fold_convert (unsigned_type, low);
+ delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
+ }
+
+ /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
+ would get out of the range. */
+ niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
+ record_estimate (loop, niter_bound, boolean_true_node, stmt,
+ false, data_size_bounds_p);
+}
+
/* Initialize LOOP->ESTIMATED_NB_ITERATIONS with the lowest safe
approximation of the number of iterations for LOOP. */
compute_estimated_nb_iterations (struct loop *loop)
{
struct nb_iter_bound *bound;
-
+
+ gcc_assert (loop->estimate_state == EST_NOT_AVAILABLE);
+
for (bound = loop->bounds; bound; bound = bound->next)
{
- if (TREE_CODE (bound->bound) != INTEGER_CST)
+ if (!bound->realistic)
continue;
/* Update only when there is no previous estimation, or when the current
estimation is smaller. */
- if (chrec_contains_undetermined (loop->estimated_nb_iterations)
- || tree_int_cst_lt (bound->bound, loop->estimated_nb_iterations))
- loop->estimated_nb_iterations = bound->bound;
+ if (loop->estimate_state == EST_NOT_AVAILABLE
+ || double_int_ucmp (bound->bound, loop->estimated_nb_iterations) < 0)
+ {
+ loop->estimate_state = EST_AVAILABLE;
+ loop->estimated_nb_iterations = bound->bound;
+ }
+ }
+}
+
+/* Determine information about number of iterations a LOOP from the index
+ IDX of a data reference accessed in STMT. Callback for for_each_index. */
+
+struct ilb_data
+{
+ struct loop *loop;
+ tree stmt;
+};
+
+static bool
+idx_infer_loop_bounds (tree base, tree *idx, void *dta)
+{
+ struct ilb_data *data = dta;
+ tree ev, init, step;
+ tree low, high, type, next;
+ bool sign;
+ struct loop *loop = data->loop;
+
+ if (TREE_CODE (base) != ARRAY_REF)
+ return true;
+
+ ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, *idx));
+ init = initial_condition (ev);
+ step = evolution_part_in_loop_num (ev, loop->num);
+
+ if (!init
+ || !step
+ || TREE_CODE (step) != INTEGER_CST
+ || integer_zerop (step)
+ || tree_contains_chrecs (init, NULL)
+ || chrec_contains_symbols_defined_in_loop (init, loop->num))
+ return true;
+
+ low = array_ref_low_bound (base);
+ high = array_ref_up_bound (base);
+
+ /* The case of nonconstant bounds could be handled, but it would be
+ complicated. */
+ if (TREE_CODE (low) != INTEGER_CST
+ || !high
+ || TREE_CODE (high) != INTEGER_CST)
+ return true;
+ sign = tree_int_cst_sign_bit (step);
+ type = TREE_TYPE (step);
+
+ /* In case the relevant bound of the array does not fit in type, or
+ it does, but bound + step (in type) still belongs into the range of the
+ array, the index may wrap and still stay within the range of the array
+ (consider e.g. if the array is indexed by the full range of
+ unsigned char).
+
+ To make things simpler, we require both bounds to fit into type, although
+ there are cases where this would not be strictly necessary. */
+ if (!int_fits_type_p (high, type)
+ || !int_fits_type_p (low, type))
+ return true;
+ low = fold_convert (type, low);
+ high = fold_convert (type, high);
+
+ if (sign)
+ next = fold_binary (PLUS_EXPR, type, low, step);
+ else
+ next = fold_binary (PLUS_EXPR, type, high, step);
+
+ if (tree_int_cst_compare (low, next) <= 0
+ && tree_int_cst_compare (next, high) <= 0)
+ return true;
+
+ record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true);
+ return true;
+}
+
+/* Determine information about number of iterations a LOOP from the bounds
+ of arrays in the data reference REF accessed in STMT. */
+
+static void
+infer_loop_bounds_from_ref (struct loop *loop, tree stmt, tree ref)
+{
+ struct ilb_data data;
+
+ data.loop = loop;
+ data.stmt = stmt;
+ for_each_index (&ref, idx_infer_loop_bounds, &data);
+}
+
+/* Determine information about number of iterations of a LOOP from the way
+ arrays are used in STMT. */
+
+static void
+infer_loop_bounds_from_array (struct loop *loop, tree stmt)
+{
+ tree call;
+
+ if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
+ {
+ tree op0 = GIMPLE_STMT_OPERAND (stmt, 0);
+ tree op1 = GIMPLE_STMT_OPERAND (stmt, 1);
+
+ /* For each memory access, analyze its access function
+ and record a bound on the loop iteration domain. */
+ if (REFERENCE_CLASS_P (op0))
+ infer_loop_bounds_from_ref (loop, stmt, op0);
+
+ if (REFERENCE_CLASS_P (op1))
+ infer_loop_bounds_from_ref (loop, stmt, op1);
}
+
+
+ call = get_call_expr_in (stmt);
+ if (call)
+ {
+ tree arg;
+ call_expr_arg_iterator iter;
+
+ FOR_EACH_CALL_EXPR_ARG (arg, iter, call)
+ if (REFERENCE_CLASS_P (arg))
+ infer_loop_bounds_from_ref (loop, stmt, arg);
+ }
+}
+
+/* Determine information about number of iterations of a LOOP from the fact
+ that signed arithmetics in STMT does not overflow. */
+
+static void
+infer_loop_bounds_from_signedness (struct loop *loop, tree stmt)
+{
+ tree def, base, step, scev, type, low, high;
+
+ if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
+ return;
+
+ def = GIMPLE_STMT_OPERAND (stmt, 0);
+
+ if (TREE_CODE (def) != SSA_NAME)
+ return;
+
+ type = TREE_TYPE (def);
+ if (!INTEGRAL_TYPE_P (type)
+ || !TYPE_OVERFLOW_UNDEFINED (type))
+ return;
+
+ scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
+ if (chrec_contains_undetermined (scev))
+ return;
+
+ base = initial_condition_in_loop_num (scev, loop->num);
+ step = evolution_part_in_loop_num (scev, loop->num);
+
+ if (!base || !step
+ || TREE_CODE (step) != INTEGER_CST
+ || tree_contains_chrecs (base, NULL)
+ || chrec_contains_symbols_defined_in_loop (base, loop->num))
+ return;
+
+ low = lower_bound_in_type (type, type);
+ high = upper_bound_in_type (type, type);
+
+ record_nonwrapping_iv (loop, base, step, stmt, low, high, false);
}
/* The following analyzers are extracting informations on the bounds
infer_loop_bounds_from_undefined (struct loop *loop)
{
unsigned i;
- basic_block bb, *bbs;
+ basic_block *bbs;
block_stmt_iterator bsi;
+ basic_block bb;
bbs = get_loop_body (loop);
{
bb = bbs[i];
+ /* If BB is not executed in each iteration of the loop, we cannot
+ use it to infer any information about # of iterations of the loop. */
+ if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
+ continue;
+
for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
- {
+ {
tree stmt = bsi_stmt (bsi);
- switch (TREE_CODE (stmt))
- {
- case MODIFY_EXPR:
- {
- tree op0 = TREE_OPERAND (stmt, 0);
- tree op1 = TREE_OPERAND (stmt, 1);
-
- /* For each array access, analyze its access function
- and record a bound on the loop iteration domain. */
- if (TREE_CODE (op1) == ARRAY_REF
- && !array_ref_contains_indirect_ref (op1))
- estimate_iters_using_array (stmt, op1);
-
- if (TREE_CODE (op0) == ARRAY_REF
- && !array_ref_contains_indirect_ref (op0))
- estimate_iters_using_array (stmt, op0);
-
- /* For each signed type variable in LOOP, analyze its
- scalar evolution and record a bound of the loop
- based on the type's ranges. */
- else if (!flag_wrapv && TREE_CODE (op0) == SSA_NAME)
- {
- tree init, step, diff, estimation;
- tree scev = instantiate_parameters
- (loop, analyze_scalar_evolution (loop, op0));
- tree type = chrec_type (scev);
-
- if (chrec_contains_undetermined (scev)
- || TYPE_UNSIGNED (type))
- break;
-
- init = initial_condition_in_loop_num (scev, loop->num);
- step = evolution_part_in_loop_num (scev, loop->num);
-
- if (init == NULL_TREE
- || step == NULL_TREE
- || TREE_CODE (init) != INTEGER_CST
- || TREE_CODE (step) != INTEGER_CST
- || TYPE_MIN_VALUE (type) == NULL_TREE
- || TYPE_MAX_VALUE (type) == NULL_TREE)
- break;
-
- if (integer_nonzerop (step))
- {
- tree utype;
-
- if (tree_int_cst_lt (step, integer_zero_node))
- diff = fold_build2 (MINUS_EXPR, type, init,
- TYPE_MIN_VALUE (type));
- else
- diff = fold_build2 (MINUS_EXPR, type,
- TYPE_MAX_VALUE (type), init);
-
- utype = unsigned_type_for (type);
- estimation = fold_build2 (CEIL_DIV_EXPR, type, diff,
- step);
- record_estimate (loop,
- fold_convert (utype, estimation),
- boolean_true_node, stmt);
- }
- }
-
- break;
- }
-
- case CALL_EXPR:
- {
- tree args;
-
- for (args = TREE_OPERAND (stmt, 1); args;
- args = TREE_CHAIN (args))
- if (TREE_CODE (TREE_VALUE (args)) == ARRAY_REF
- && !array_ref_contains_indirect_ref (TREE_VALUE (args)))
- estimate_iters_using_array (stmt, TREE_VALUE (args));
-
- break;
- }
-
- default:
- break;
- }
- }
+ infer_loop_bounds_from_array (loop, stmt);
+ infer_loop_bounds_from_signedness (loop, stmt);
+ }
+
}
- compute_estimated_nb_iterations (loop);
free (bbs);
}
static void
estimate_numbers_of_iterations_loop (struct loop *loop)
{
- edge *exits;
+ VEC (edge, heap) *exits;
tree niter, type;
- unsigned i, n_exits;
+ unsigned i;
struct tree_niter_desc niter_desc;
+ edge ex;
/* Give up if we already have tried to compute an estimation. */
- if (loop->estimated_nb_iterations == chrec_dont_know
- /* Or when we already have an estimation. */
- || (loop->estimated_nb_iterations != NULL_TREE
- && TREE_CODE (loop->estimated_nb_iterations) == INTEGER_CST))
+ if (loop->estimate_state != EST_NOT_COMPUTED)
return;
- else
- loop->estimated_nb_iterations = chrec_dont_know;
+ loop->estimate_state = EST_NOT_AVAILABLE;
- exits = get_loop_exit_edges (loop, &n_exits);
- for (i = 0; i < n_exits; i++)
+ exits = get_loop_exit_edges (loop);
+ for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
{
- if (!number_of_iterations_exit (loop, exits[i], &niter_desc, false))
+ if (!number_of_iterations_exit (loop, ex, &niter_desc, false))
continue;
niter = niter_desc.niter;
type = TREE_TYPE (niter);
- if (!zero_p (niter_desc.may_be_zero)
- && !nonzero_p (niter_desc.may_be_zero))
+ if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
build_int_cst (type, 0),
niter);
record_estimate (loop, niter,
niter_desc.additional_info,
- last_stmt (exits[i]->src));
+ last_stmt (ex->src),
+ true, true);
}
- free (exits);
+ VEC_free (edge, heap, exits);
- if (chrec_contains_undetermined (loop->estimated_nb_iterations))
- infer_loop_bounds_from_undefined (loop);
+ infer_loop_bounds_from_undefined (loop);
+ compute_estimated_nb_iterations (loop);
}
-/* Records estimates on numbers of iterations of LOOPS. */
+/* Records estimates on numbers of iterations of loops. */
void
-estimate_numbers_of_iterations (struct loops *loops)
+estimate_numbers_of_iterations (void)
{
- unsigned i;
+ loop_iterator li;
struct loop *loop;
- for (i = 1; i < loops->num; i++)
+ /* We don't want to issue signed overflow warnings while getting
+ loop iteration estimates. */
+ fold_defer_overflow_warnings ();
+
+ FOR_EACH_LOOP (li, loop, 0)
{
- loop = loops->parray[i];
- if (loop)
- estimate_numbers_of_iterations_loop (loop);
+ estimate_numbers_of_iterations_loop (loop);
}
+
+ fold_undefer_and_ignore_overflow_warnings ();
}
/* Returns true if statement S1 dominates statement S2. */
}
/* Returns true when we can prove that the number of executions of
- STMT in the loop is at most NITER, according to the fact
- that the statement NITER_BOUND->at_stmt is executed at most
- NITER_BOUND->bound times. */
+ STMT in the loop is at most NITER, according to the bound on
+ the number of executions of the statement NITER_BOUND->stmt recorded in
+ NITER_BOUND. If STMT is NULL, we must prove this bound for all
+ statements in the loop. */
static bool
n_of_executions_at_most (tree stmt,
struct nb_iter_bound *niter_bound,
tree niter)
{
- tree cond;
- tree bound = niter_bound->bound;
- tree bound_type = TREE_TYPE (bound);
- tree nit_type = TREE_TYPE (niter);
+ double_int bound = niter_bound->bound;
+ tree nit_type = TREE_TYPE (niter), e;
enum tree_code cmp;
- gcc_assert (TYPE_UNSIGNED (bound_type)
- && TYPE_UNSIGNED (nit_type)
- && is_gimple_min_invariant (bound));
- if (TYPE_PRECISION (nit_type) > TYPE_PRECISION (bound_type))
- bound = fold_convert (nit_type, bound);
- else
- niter = fold_convert (bound_type, niter);
-
- /* After the statement niter_bound->at_stmt we know that anything is
- executed at most BOUND times. */
- if (stmt && stmt_dominates_stmt_p (niter_bound->at_stmt, stmt))
- cmp = GE_EXPR;
- /* Before the statement niter_bound->at_stmt we know that anything
- is executed at most BOUND + 1 times. */
+ gcc_assert (TYPE_UNSIGNED (nit_type));
+
+ /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
+ the number of iterations is small. */
+ if (!double_int_fits_to_tree_p (nit_type, bound))
+ return false;
+
+ /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
+ times. This means that:
+
+ -- if NITER_BOUND->is_exit is true, then everything before
+ NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
+ times, and everything after it at most NITER_BOUND->bound times.
+
+ -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
+ is executed, then NITER_BOUND->stmt is executed as well in the same
+ iteration (we conclude that if both statements belong to the same
+ basic block, or if STMT is after NITER_BOUND->stmt), then STMT
+ is executed at most NITER_BOUND->bound + 1 times. Otherwise STMT is
+ executed at most NITER_BOUND->bound + 2 times. */
+
+ if (niter_bound->is_exit)
+ {
+ if (stmt
+ && stmt != niter_bound->stmt
+ && stmt_dominates_stmt_p (niter_bound->stmt, stmt))
+ cmp = GE_EXPR;
+ else
+ cmp = GT_EXPR;
+ }
else
- cmp = GT_EXPR;
+ {
+ if (!stmt
+ || (bb_for_stmt (stmt) != bb_for_stmt (niter_bound->stmt)
+ && !stmt_dominates_stmt_p (niter_bound->stmt, stmt)))
+ {
+ bound = double_int_add (bound, double_int_one);
+ if (double_int_zero_p (bound)
+ || !double_int_fits_to_tree_p (nit_type, bound))
+ return false;
+ }
+ cmp = GT_EXPR;
+ }
- cond = fold_binary (cmp, boolean_type_node, niter, bound);
- return nonzero_p (cond);
+ e = fold_binary (cmp, boolean_type_node,
+ niter, double_int_to_tree (nit_type, bound));
+ return e && integer_nonzerop (e);
}
/* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
bool
nowrap_type_p (tree type)
{
- if (!flag_wrapv
- && INTEGRAL_TYPE_P (type)
- && !TYPE_UNSIGNED (type))
+ if (INTEGRAL_TYPE_P (type)
+ && TYPE_OVERFLOW_UNDEFINED (type))
return true;
if (POINTER_TYPE_P (type))
bool
scev_probably_wraps_p (tree base, tree step,
tree at_stmt, struct loop *loop,
- bool use_oveflow_semantics)
+ bool use_overflow_semantics)
{
struct nb_iter_bound *bound;
tree delta, step_abs;
|| TREE_CODE (step) != INTEGER_CST)
return true;
- if (zero_p (step))
+ if (integer_zerop (step))
return false;
/* If we can use the fact that signed and pointer arithmetics does not
wrap, we are done. */
- if (use_oveflow_semantics && nowrap_type_p (type))
+ if (use_overflow_semantics && nowrap_type_p (type))
return false;
+ /* Don't issue signed overflow warnings. */
+ fold_defer_overflow_warnings ();
+
/* Otherwise, compute the number of iterations before we reach the
bound of the type, and verify that the loop is exited before this
occurs. */
estimate_numbers_of_iterations_loop (loop);
for (bound = loop->bounds; bound; bound = bound->next)
- if (n_of_executions_at_most (at_stmt, bound, valid_niter))
- return false;
+ {
+ if (n_of_executions_at_most (at_stmt, bound, valid_niter))
+ {
+ fold_undefer_and_ignore_overflow_warnings ();
+ return false;
+ }
+ }
+
+ fold_undefer_and_ignore_overflow_warnings ();
/* At this point we still don't have a proof that the iv does not
overflow: give up. */
struct nb_iter_bound *bound, *next;
loop->nb_iterations = NULL;
- loop->estimated_nb_iterations = NULL;
+ loop->estimate_state = EST_NOT_COMPUTED;
for (bound = loop->bounds; bound; bound = next)
{
next = bound->next;
loop->bounds = NULL;
}
-/* Frees the information on upper bounds on numbers of iterations of LOOPS. */
+/* Frees the information on upper bounds on numbers of iterations of loops. */
void
-free_numbers_of_iterations_estimates (struct loops *loops)
+free_numbers_of_iterations_estimates (void)
{
- unsigned i;
+ loop_iterator li;
struct loop *loop;
- for (i = 1; i < loops->num; i++)
+ FOR_EACH_LOOP (li, loop, 0)
{
- loop = loops->parray[i];
- if (loop)
- free_numbers_of_iterations_estimates_loop (loop);
+ free_numbers_of_iterations_estimates_loop (loop);
}
}
substitute_in_loop_info (struct loop *loop, tree name, tree val)
{
loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
- loop->estimated_nb_iterations
- = simplify_replace_tree (loop->estimated_nb_iterations, name, val);
}