1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "insn-codes.h"
30 #include "tree-pass.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
40 #include "gimple-fold.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
61 #include "case-cfn-macros.h"
63 #include "alloc-pool.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
68 #include "vr-values.h"
70 #include "wide-int-range.h"
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
76 /* Return true if the SSA name NAME is live on the edge E. */
79 live_on_edge (edge e, tree name)
81 return (live[e->dest->index]
82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
85 /* Location information for ASSERT_EXPRs. Each instance of this
86 structure describes an ASSERT_EXPR for an SSA name. Since a single
87 SSA name may have more than one assertion associated with it, these
88 locations are kept in a linked list attached to the corresponding
92 /* Basic block where the assertion would be inserted. */
95 /* Some assertions need to be inserted on an edge (e.g., assertions
96 generated by COND_EXPRs). In those cases, BB will be NULL. */
99 /* Pointer to the statement that generated this assertion. */
100 gimple_stmt_iterator si;
102 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
103 enum tree_code comp_code;
105 /* Value being compared against. */
108 /* Expression to compare. */
111 /* Next node in the linked list. */
115 /* If bit I is present, it means that SSA name N_i has a list of
116 assertions that should be inserted in the IL. */
117 static bitmap need_assert_for;
119 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
120 holds a list of ASSERT_LOCUS_T nodes that describe where
121 ASSERT_EXPRs for SSA name N_I should be inserted. */
122 static assert_locus **asserts_for;
124 vec<edge> to_remove_edges;
125 vec<switch_update> to_update_switch_stmts;
128 /* Return the maximum value for TYPE. */
131 vrp_val_max (const_tree type)
133 if (!INTEGRAL_TYPE_P (type))
136 return TYPE_MAX_VALUE (type);
139 /* Return the minimum value for TYPE. */
142 vrp_val_min (const_tree type)
144 if (!INTEGRAL_TYPE_P (type))
147 return TYPE_MIN_VALUE (type);
150 /* Return whether VAL is equal to the maximum value of its type.
151 We can't do a simple equality comparison with TYPE_MAX_VALUE because
152 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
153 is not == to the integer constant with the same value in the type. */
156 vrp_val_is_max (const_tree val)
158 tree type_max = vrp_val_max (TREE_TYPE (val));
159 return (val == type_max
160 || (type_max != NULL_TREE
161 && operand_equal_p (val, type_max, 0)));
164 /* Return whether VAL is equal to the minimum value of its type. */
167 vrp_val_is_min (const_tree val)
169 tree type_min = vrp_val_min (TREE_TYPE (val));
170 return (val == type_min
171 || (type_min != NULL_TREE
172 && operand_equal_p (val, type_min, 0)));
175 /* VR_TYPE describes a range with mininum value *MIN and maximum
176 value *MAX. Restrict the range to the set of values that have
177 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
178 return the new range type.
180 SGN gives the sign of the values described by the range. */
182 enum value_range_type
183 intersect_range_with_nonzero_bits (enum value_range_type vr_type,
184 wide_int *min, wide_int *max,
185 const wide_int &nonzero_bits,
188 if (vr_type == VR_ANTI_RANGE)
190 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
191 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
192 to create an inclusive upper bound for A and an inclusive lower
194 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
195 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
197 /* If the calculation of A_MAX wrapped, A is effectively empty
198 and A_MAX is the highest value that satisfies NONZERO_BITS.
199 Likewise if the calculation of B_MIN wrapped, B is effectively
200 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
201 bool a_empty = wi::ge_p (a_max, *min, sgn);
202 bool b_empty = wi::le_p (b_min, *max, sgn);
204 /* If both A and B are empty, there are no valid values. */
205 if (a_empty && b_empty)
208 /* If exactly one of A or B is empty, return a VR_RANGE for the
210 if (a_empty || b_empty)
214 gcc_checking_assert (wi::le_p (*min, *max, sgn));
218 /* Update the VR_ANTI_RANGE bounds. */
221 gcc_checking_assert (wi::le_p (*min, *max, sgn));
223 /* Now check whether the excluded range includes any values that
224 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
225 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
227 unsigned int precision = min->get_precision ();
228 *min = wi::min_value (precision, sgn);
229 *max = wi::max_value (precision, sgn);
233 if (vr_type == VR_RANGE)
235 *max = wi::round_down_for_mask (*max, nonzero_bits);
237 /* Check that the range contains at least one valid value. */
238 if (wi::gt_p (*min, *max, sgn))
241 *min = wi::round_up_for_mask (*min, nonzero_bits);
242 gcc_checking_assert (wi::le_p (*min, *max, sgn));
247 /* Set value range VR to VR_UNDEFINED. */
250 set_value_range_to_undefined (value_range *vr)
252 vr->type = VR_UNDEFINED;
253 vr->min = vr->max = NULL_TREE;
255 bitmap_clear (vr->equiv);
258 /* Set value range VR to VR_VARYING. */
261 set_value_range_to_varying (value_range *vr)
263 vr->type = VR_VARYING;
264 vr->min = vr->max = NULL_TREE;
266 bitmap_clear (vr->equiv);
269 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
272 set_value_range (value_range *vr, enum value_range_type t, tree min,
273 tree max, bitmap equiv)
275 /* Check the validity of the range. */
277 && (t == VR_RANGE || t == VR_ANTI_RANGE))
281 gcc_assert (min && max);
283 gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
285 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
286 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
288 cmp = compare_values (min, max);
289 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
293 && (t == VR_UNDEFINED || t == VR_VARYING))
295 gcc_assert (min == NULL_TREE && max == NULL_TREE);
296 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
303 /* Since updating the equivalence set involves deep copying the
304 bitmaps, only do it if absolutely necessary.
306 All equivalence bitmaps are allocated from the same obstack. So
307 we can use the obstack associated with EQUIV to allocate vr->equiv. */
308 if (vr->equiv == NULL
310 vr->equiv = BITMAP_ALLOC (equiv->obstack);
312 if (equiv != vr->equiv)
314 if (equiv && !bitmap_empty_p (equiv))
315 bitmap_copy (vr->equiv, equiv);
317 bitmap_clear (vr->equiv);
322 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
323 This means adjusting T, MIN and MAX representing the case of a
324 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
325 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
326 In corner cases where MAX+1 or MIN-1 wraps this will fall back
328 This routine exists to ease canonicalization in the case where we
329 extract ranges from var + CST op limit. */
332 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
333 tree min, tree max, bitmap equiv)
335 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
336 if (t == VR_UNDEFINED)
338 set_value_range_to_undefined (vr);
341 else if (t == VR_VARYING)
343 set_value_range_to_varying (vr);
347 /* Nothing to canonicalize for symbolic ranges. */
348 if (TREE_CODE (min) != INTEGER_CST
349 || TREE_CODE (max) != INTEGER_CST)
351 set_value_range (vr, t, min, max, equiv);
355 /* Wrong order for min and max, to swap them and the VR type we need
357 if (tree_int_cst_lt (max, min))
361 /* For one bit precision if max < min, then the swapped
362 range covers all values, so for VR_RANGE it is varying and
363 for VR_ANTI_RANGE empty range, so drop to varying as well. */
364 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
366 set_value_range_to_varying (vr);
370 one = build_int_cst (TREE_TYPE (min), 1);
371 tmp = int_const_binop (PLUS_EXPR, max, one);
372 max = int_const_binop (MINUS_EXPR, min, one);
375 /* There's one corner case, if we had [C+1, C] before we now have
376 that again. But this represents an empty value range, so drop
377 to varying in this case. */
378 if (tree_int_cst_lt (max, min))
380 set_value_range_to_varying (vr);
384 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
387 /* Anti-ranges that can be represented as ranges should be so. */
388 if (t == VR_ANTI_RANGE)
390 /* For -fstrict-enums we may receive out-of-range ranges so consider
391 values < -INF and values > INF as -INF/INF as well. */
392 tree type = TREE_TYPE (min);
393 bool is_min = (INTEGRAL_TYPE_P (type)
394 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
395 bool is_max = (INTEGRAL_TYPE_P (type)
396 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
398 if (is_min && is_max)
400 /* We cannot deal with empty ranges, drop to varying.
401 ??? This could be VR_UNDEFINED instead. */
402 set_value_range_to_varying (vr);
405 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
406 && (is_min || is_max))
408 /* Non-empty boolean ranges can always be represented
409 as a singleton range. */
411 min = max = vrp_val_max (TREE_TYPE (min));
413 min = max = vrp_val_min (TREE_TYPE (min));
417 /* As a special exception preserve non-null ranges. */
418 && !(TYPE_UNSIGNED (TREE_TYPE (min))
419 && integer_zerop (max)))
421 tree one = build_int_cst (TREE_TYPE (max), 1);
422 min = int_const_binop (PLUS_EXPR, max, one);
423 max = vrp_val_max (TREE_TYPE (max));
428 tree one = build_int_cst (TREE_TYPE (min), 1);
429 max = int_const_binop (MINUS_EXPR, min, one);
430 min = vrp_val_min (TREE_TYPE (min));
435 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
436 to make sure VRP iteration terminates, otherwise we can get into
439 set_value_range (vr, t, min, max, equiv);
442 /* Copy value range FROM into value range TO. */
445 copy_value_range (value_range *to, const value_range *from)
447 set_value_range (to, from->type, from->min, from->max, from->equiv);
450 /* Set value range VR to a single value. This function is only called
451 with values we get from statements, and exists to clear the
452 TREE_OVERFLOW flag. */
455 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
457 gcc_assert (is_gimple_min_invariant (val));
458 if (TREE_OVERFLOW_P (val))
459 val = drop_tree_overflow (val);
460 set_value_range (vr, VR_RANGE, val, val, equiv);
463 /* Set value range VR to a non-NULL range of type TYPE. */
466 set_value_range_to_nonnull (value_range *vr, tree type)
468 tree zero = build_int_cst (type, 0);
469 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
473 /* Set value range VR to a NULL range of type TYPE. */
476 set_value_range_to_null (value_range *vr, tree type)
478 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
481 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
484 vrp_operand_equal_p (const_tree val1, const_tree val2)
488 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
493 /* Return true, if the bitmaps B1 and B2 are equal. */
496 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
499 || ((!b1 || bitmap_empty_p (b1))
500 && (!b2 || bitmap_empty_p (b2)))
502 && bitmap_equal_p (b1, b2)));
505 /* Return true if VR is [0, 0]. */
508 range_is_null (const value_range *vr)
510 return vr->type == VR_RANGE
511 && integer_zerop (vr->min)
512 && integer_zerop (vr->max);
515 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
519 range_int_cst_p (const value_range *vr)
521 return (vr->type == VR_RANGE
522 && TREE_CODE (vr->max) == INTEGER_CST
523 && TREE_CODE (vr->min) == INTEGER_CST);
526 /* Return true if VR is a INTEGER_CST singleton. */
529 range_int_cst_singleton_p (const value_range *vr)
531 return (range_int_cst_p (vr)
532 && tree_int_cst_equal (vr->min, vr->max));
535 /* Return true if value range VR involves at least one symbol. */
538 symbolic_range_p (const value_range *vr)
540 return (!is_gimple_min_invariant (vr->min)
541 || !is_gimple_min_invariant (vr->max));
544 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
545 otherwise. We only handle additive operations and set NEG to true if the
546 symbol is negated and INV to the invariant part, if any. */
549 get_single_symbol (tree t, bool *neg, tree *inv)
557 if (TREE_CODE (t) == PLUS_EXPR
558 || TREE_CODE (t) == POINTER_PLUS_EXPR
559 || TREE_CODE (t) == MINUS_EXPR)
561 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
563 neg_ = (TREE_CODE (t) == MINUS_EXPR);
564 inv_ = TREE_OPERAND (t, 0);
565 t = TREE_OPERAND (t, 1);
567 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
570 inv_ = TREE_OPERAND (t, 1);
571 t = TREE_OPERAND (t, 0);
582 if (TREE_CODE (t) == NEGATE_EXPR)
584 t = TREE_OPERAND (t, 0);
588 if (TREE_CODE (t) != SSA_NAME)
591 if (inv_ && TREE_OVERFLOW_P (inv_))
592 inv_ = drop_tree_overflow (inv_);
599 /* The reverse operation: build a symbolic expression with TYPE
600 from symbol SYM, negated according to NEG, and invariant INV. */
603 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
605 const bool pointer_p = POINTER_TYPE_P (type);
609 t = build1 (NEGATE_EXPR, type, t);
611 if (integer_zerop (inv))
614 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
620 -2 if those are incomparable. */
622 operand_less_p (tree val, tree val2)
624 /* LT is folded faster than GE and others. Inline the common case. */
625 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
626 return tree_int_cst_lt (val, val2);
631 fold_defer_overflow_warnings ();
633 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
635 fold_undefer_and_ignore_overflow_warnings ();
638 || TREE_CODE (tcmp) != INTEGER_CST)
641 if (!integer_zerop (tcmp))
648 /* Compare two values VAL1 and VAL2. Return
650 -2 if VAL1 and VAL2 cannot be compared at compile-time,
653 +1 if VAL1 > VAL2, and
656 This is similar to tree_int_cst_compare but supports pointer values
657 and values that cannot be compared at compile time.
659 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
660 true if the return value is only valid if we assume that signed
661 overflow is undefined. */
664 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
669 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
671 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
672 == POINTER_TYPE_P (TREE_TYPE (val2)));
674 /* Convert the two values into the same type. This is needed because
675 sizetype causes sign extension even for unsigned types. */
676 val2 = fold_convert (TREE_TYPE (val1), val2);
677 STRIP_USELESS_TYPE_CONVERSION (val2);
679 const bool overflow_undefined
680 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
681 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
684 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
685 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
687 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
688 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
691 /* Both values must use the same name with the same sign. */
692 if (sym1 != sym2 || neg1 != neg2)
695 /* [-]NAME + CST == [-]NAME + CST. */
699 /* If overflow is defined we cannot simplify more. */
700 if (!overflow_undefined)
703 if (strict_overflow_p != NULL
704 /* Symbolic range building sets TREE_NO_WARNING to declare
705 that overflow doesn't happen. */
706 && (!inv1 || !TREE_NO_WARNING (val1))
707 && (!inv2 || !TREE_NO_WARNING (val2)))
708 *strict_overflow_p = true;
711 inv1 = build_int_cst (TREE_TYPE (val1), 0);
713 inv2 = build_int_cst (TREE_TYPE (val2), 0);
715 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
716 TYPE_SIGN (TREE_TYPE (val1)));
719 const bool cst1 = is_gimple_min_invariant (val1);
720 const bool cst2 = is_gimple_min_invariant (val2);
722 /* If one is of the form '[-]NAME + CST' and the other is constant, then
723 it might be possible to say something depending on the constants. */
724 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
726 if (!overflow_undefined)
729 if (strict_overflow_p != NULL
730 /* Symbolic range building sets TREE_NO_WARNING to declare
731 that overflow doesn't happen. */
732 && (!sym1 || !TREE_NO_WARNING (val1))
733 && (!sym2 || !TREE_NO_WARNING (val2)))
734 *strict_overflow_p = true;
736 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
737 tree cst = cst1 ? val1 : val2;
738 tree inv = cst1 ? inv2 : inv1;
740 /* Compute the difference between the constants. If it overflows or
741 underflows, this means that we can trivially compare the NAME with
742 it and, consequently, the two values with each other. */
743 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
744 if (wi::cmp (0, wi::to_wide (inv), sgn)
745 != wi::cmp (diff, wi::to_wide (cst), sgn))
747 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
748 return cst1 ? res : -res;
754 /* We cannot say anything more for non-constants. */
758 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
760 /* We cannot compare overflowed values. */
761 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
764 if (TREE_CODE (val1) == INTEGER_CST
765 && TREE_CODE (val2) == INTEGER_CST)
766 return tree_int_cst_compare (val1, val2);
768 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
770 if (known_eq (wi::to_poly_widest (val1),
771 wi::to_poly_widest (val2)))
773 if (known_lt (wi::to_poly_widest (val1),
774 wi::to_poly_widest (val2)))
776 if (known_gt (wi::to_poly_widest (val1),
777 wi::to_poly_widest (val2)))
787 /* First see if VAL1 and VAL2 are not the same. */
788 if (val1 == val2 || operand_equal_p (val1, val2, 0))
791 /* If VAL1 is a lower address than VAL2, return -1. */
792 if (operand_less_p (val1, val2) == 1)
795 /* If VAL1 is a higher address than VAL2, return +1. */
796 if (operand_less_p (val2, val1) == 1)
799 /* If VAL1 is different than VAL2, return +2.
800 For integer constants we either have already returned -1 or 1
801 or they are equivalent. We still might succeed in proving
802 something about non-trivial operands. */
803 if (TREE_CODE (val1) != INTEGER_CST
804 || TREE_CODE (val2) != INTEGER_CST)
806 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
807 if (t && integer_onep (t))
815 /* Compare values like compare_values_warnv. */
818 compare_values (tree val1, tree val2)
821 return compare_values_warnv (val1, val2, &sop);
825 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
826 0 if VAL is not inside [MIN, MAX],
827 -2 if we cannot tell either way.
829 Benchmark compile/20001226-1.c compilation time after changing this
833 value_inside_range (tree val, tree min, tree max)
837 cmp1 = operand_less_p (val, min);
843 cmp2 = operand_less_p (max, val);
851 /* Return true if value ranges VR0 and VR1 have a non-empty
854 Benchmark compile/20001226-1.c compilation time after changing this
859 value_ranges_intersect_p (const value_range *vr0, const value_range *vr1)
861 /* The value ranges do not intersect if the maximum of the first range is
862 less than the minimum of the second range or vice versa.
863 When those relations are unknown, we can't do any better. */
864 if (operand_less_p (vr0->max, vr1->min) != 0)
866 if (operand_less_p (vr1->max, vr0->min) != 0)
872 /* Return TRUE if *VR includes the value zero. */
875 range_includes_zero_p (const value_range *vr)
877 if (vr->type == VR_VARYING)
880 /* Ughh, we don't know. We choose not to optimize. */
881 if (vr->type == VR_UNDEFINED)
884 tree zero = build_int_cst (TREE_TYPE (vr->min), 0);
885 if (vr->type == VR_ANTI_RANGE)
887 int res = value_inside_range (zero, vr->min, vr->max);
888 return res == 0 || res == -2;
890 return value_inside_range (zero, vr->min, vr->max) != 0;
893 /* Return true if *VR is know to only contain nonnegative values. */
896 value_range_nonnegative_p (const value_range *vr)
898 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
899 which would return a useful value should be encoded as a
901 if (vr->type == VR_RANGE)
903 int result = compare_values (vr->min, integer_zero_node);
904 return (result == 0 || result == 1);
910 /* If *VR has a value rante that is a single constant value return that,
911 otherwise return NULL_TREE. */
914 value_range_constant_singleton (const value_range *vr)
916 if (vr->type == VR_RANGE
917 && vrp_operand_equal_p (vr->min, vr->max)
918 && is_gimple_min_invariant (vr->min))
924 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
926 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
928 Return TRUE if VR was a constant range and we were able to compute
932 vrp_set_zero_nonzero_bits (const tree expr_type,
933 const value_range *vr,
934 wide_int *may_be_nonzero,
935 wide_int *must_be_nonzero)
937 if (!range_int_cst_p (vr))
939 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
940 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
943 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
944 wi::to_wide (vr->min),
945 wi::to_wide (vr->max),
946 *may_be_nonzero, *must_be_nonzero);
950 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
951 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
952 false otherwise. If *AR can be represented with a single range
953 *VR1 will be VR_UNDEFINED. */
956 ranges_from_anti_range (const value_range *ar,
957 value_range *vr0, value_range *vr1)
959 tree type = TREE_TYPE (ar->min);
961 vr0->type = VR_UNDEFINED;
962 vr1->type = VR_UNDEFINED;
964 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
965 [A+1, +INF]. Not sure if this helps in practice, though. */
967 if (ar->type != VR_ANTI_RANGE
968 || TREE_CODE (ar->min) != INTEGER_CST
969 || TREE_CODE (ar->max) != INTEGER_CST
970 || !vrp_val_min (type)
971 || !vrp_val_max (type))
974 if (!vrp_val_is_min (ar->min))
976 vr0->type = VR_RANGE;
977 vr0->min = vrp_val_min (type);
978 vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
980 if (!vrp_val_is_max (ar->max))
982 vr1->type = VR_RANGE;
983 vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
984 vr1->max = vrp_val_max (type);
986 if (vr0->type == VR_UNDEFINED)
989 vr1->type = VR_UNDEFINED;
992 return vr0->type != VR_UNDEFINED;
995 /* Extract the components of a value range into a pair of wide ints in
998 If the value range is anything but a VR_*RANGE of constants, the
999 resulting wide ints are set to [-MIN, +MAX] for the type. */
1002 extract_range_into_wide_ints (const value_range *vr,
1003 signop sign, unsigned prec,
1004 wide_int &wmin, wide_int &wmax)
1006 if ((vr->type == VR_RANGE
1007 || vr->type == VR_ANTI_RANGE)
1008 && TREE_CODE (vr->min) == INTEGER_CST
1009 && TREE_CODE (vr->max) == INTEGER_CST)
1011 wmin = wi::to_wide (vr->min);
1012 wmax = wi::to_wide (vr->max);
1016 wmin = wi::min_value (prec, sign);
1017 wmax = wi::max_value (prec, sign);
1021 /* Value range wrapper for wide_int_range_multiplicative_op:
1023 *VR = *VR0 .CODE. *VR1. */
1026 extract_range_from_multiplicative_op (value_range *vr,
1027 enum tree_code code,
1028 const value_range *vr0,
1029 const value_range *vr1)
1031 gcc_assert (code == MULT_EXPR
1032 || code == TRUNC_DIV_EXPR
1033 || code == FLOOR_DIV_EXPR
1034 || code == CEIL_DIV_EXPR
1035 || code == EXACT_DIV_EXPR
1036 || code == ROUND_DIV_EXPR
1037 || code == RSHIFT_EXPR
1038 || code == LSHIFT_EXPR);
1039 gcc_assert (vr0->type == VR_RANGE && vr0->type == vr1->type);
1041 tree type = TREE_TYPE (vr0->min);
1042 wide_int res_lb, res_ub;
1043 wide_int vr0_lb = wi::to_wide (vr0->min);
1044 wide_int vr0_ub = wi::to_wide (vr0->max);
1045 wide_int vr1_lb = wi::to_wide (vr1->min);
1046 wide_int vr1_ub = wi::to_wide (vr1->max);
1047 bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1048 bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
1049 unsigned prec = TYPE_PRECISION (type);
1051 if (wide_int_range_multiplicative_op (res_lb, res_ub,
1052 code, TYPE_SIGN (type), prec,
1053 vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1054 overflow_undefined, overflow_wraps))
1055 set_and_canonicalize_value_range (vr, VR_RANGE,
1056 wide_int_to_tree (type, res_lb),
1057 wide_int_to_tree (type, res_ub), NULL);
1059 set_value_range_to_varying (vr);
1062 /* If BOUND will include a symbolic bound, adjust it accordingly,
1063 otherwise leave it as is.
1065 CODE is the original operation that combined the bounds (PLUS_EXPR
1068 TYPE is the type of the original operation.
1070 SYM_OPn is the symbolic for OPn if it has a symbolic.
1072 NEG_OPn is TRUE if the OPn was negated. */
1075 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1076 tree sym_op0, tree sym_op1,
1077 bool neg_op0, bool neg_op1)
1079 bool minus_p = (code == MINUS_EXPR);
1080 /* If the result bound is constant, we're done; otherwise, build the
1081 symbolic lower bound. */
1082 if (sym_op0 == sym_op1)
1085 bound = build_symbolic_expr (type, sym_op0,
1089 /* We may not negate if that might introduce
1090 undefined overflow. */
1093 || TYPE_OVERFLOW_WRAPS (type))
1094 bound = build_symbolic_expr (type, sym_op1,
1095 neg_op1 ^ minus_p, bound);
1101 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1102 int bound according to CODE. CODE is the operation combining the
1103 bound (either a PLUS_EXPR or a MINUS_EXPR).
1105 TYPE is the type of the combine operation.
1107 WI is the wide int to store the result.
1109 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1110 if over/underflow occurred. */
1113 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1114 tree type, tree op0, tree op1)
1116 bool minus_p = (code == MINUS_EXPR);
1117 const signop sgn = TYPE_SIGN (type);
1118 const unsigned int prec = TYPE_PRECISION (type);
1120 /* Combine the bounds, if any. */
1124 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1126 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1129 wi = wi::to_wide (op0);
1133 wi = wi::neg (wi::to_wide (op1), &ovf);
1135 wi = wi::to_wide (op1);
1138 wi = wi::shwi (0, prec);
1141 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1142 put the result in VR.
1144 TYPE is the type of the range.
1146 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1147 occurred while originally calculating WMIN or WMAX. -1 indicates
1148 underflow. +1 indicates overflow. 0 indicates neither. */
1151 set_value_range_with_overflow (value_range &vr,
1153 const wide_int &wmin, const wide_int &wmax,
1154 wi::overflow_type min_ovf,
1155 wi::overflow_type max_ovf)
1157 const signop sgn = TYPE_SIGN (type);
1158 const unsigned int prec = TYPE_PRECISION (type);
1161 if (TYPE_OVERFLOW_WRAPS (type))
1163 /* If overflow wraps, truncate the values and adjust the
1164 range kind and bounds appropriately. */
1165 wide_int tmin = wide_int::from (wmin, prec, sgn);
1166 wide_int tmax = wide_int::from (wmax, prec, sgn);
1167 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1169 /* No overflow or both overflow or underflow. The
1170 range kind stays VR_RANGE. */
1171 vr.min = wide_int_to_tree (type, tmin);
1172 vr.max = wide_int_to_tree (type, tmax);
1174 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1175 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1177 /* Min underflow or max overflow. The range kind
1178 changes to VR_ANTI_RANGE. */
1179 bool covers = false;
1180 wide_int tem = tmin;
1181 vr.type = VR_ANTI_RANGE;
1183 if (wi::cmp (tmin, tmax, sgn) < 0)
1186 if (wi::cmp (tmax, tem, sgn) > 0)
1188 /* If the anti-range would cover nothing, drop to varying.
1189 Likewise if the anti-range bounds are outside of the
1191 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1193 set_value_range_to_varying (&vr);
1196 vr.min = wide_int_to_tree (type, tmin);
1197 vr.max = wide_int_to_tree (type, tmax);
1201 /* Other underflow and/or overflow, drop to VR_VARYING. */
1202 set_value_range_to_varying (&vr);
1208 /* If overflow does not wrap, saturate to the types min/max
1210 wide_int type_min = wi::min_value (prec, sgn);
1211 wide_int type_max = wi::max_value (prec, sgn);
1212 if (min_ovf == wi::OVF_UNDERFLOW)
1213 vr.min = wide_int_to_tree (type, type_min);
1214 else if (min_ovf == wi::OVF_OVERFLOW)
1215 vr.min = wide_int_to_tree (type, type_max);
1217 vr.min = wide_int_to_tree (type, wmin);
1219 if (max_ovf == wi::OVF_UNDERFLOW)
1220 vr.max = wide_int_to_tree (type, type_min);
1221 else if (max_ovf == wi::OVF_OVERFLOW)
1222 vr.max = wide_int_to_tree (type, type_max);
1224 vr.max = wide_int_to_tree (type, wmax);
1228 /* Extract range information from a binary operation CODE based on
1229 the ranges of each of its operands *VR0 and *VR1 with resulting
1230 type EXPR_TYPE. The resulting range is stored in *VR. */
1233 extract_range_from_binary_expr_1 (value_range *vr,
1234 enum tree_code code, tree expr_type,
1235 const value_range *vr0_,
1236 const value_range *vr1_)
1238 signop sign = TYPE_SIGN (expr_type);
1239 unsigned int prec = TYPE_PRECISION (expr_type);
1240 value_range vr0 = *vr0_, vr1 = *vr1_;
1241 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1242 enum value_range_type type;
1243 tree min = NULL_TREE, max = NULL_TREE;
1246 if (!INTEGRAL_TYPE_P (expr_type)
1247 && !POINTER_TYPE_P (expr_type))
1249 set_value_range_to_varying (vr);
1253 /* Not all binary expressions can be applied to ranges in a
1254 meaningful way. Handle only arithmetic operations. */
1255 if (code != PLUS_EXPR
1256 && code != MINUS_EXPR
1257 && code != POINTER_PLUS_EXPR
1258 && code != MULT_EXPR
1259 && code != TRUNC_DIV_EXPR
1260 && code != FLOOR_DIV_EXPR
1261 && code != CEIL_DIV_EXPR
1262 && code != EXACT_DIV_EXPR
1263 && code != ROUND_DIV_EXPR
1264 && code != TRUNC_MOD_EXPR
1265 && code != RSHIFT_EXPR
1266 && code != LSHIFT_EXPR
1269 && code != BIT_AND_EXPR
1270 && code != BIT_IOR_EXPR
1271 && code != BIT_XOR_EXPR)
1273 set_value_range_to_varying (vr);
1277 /* If both ranges are UNDEFINED, so is the result. */
1278 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1280 set_value_range_to_undefined (vr);
1283 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1284 code. At some point we may want to special-case operations that
1285 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1287 else if (vr0.type == VR_UNDEFINED)
1288 set_value_range_to_varying (&vr0);
1289 else if (vr1.type == VR_UNDEFINED)
1290 set_value_range_to_varying (&vr1);
1292 /* We get imprecise results from ranges_from_anti_range when
1293 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1294 range, but then we also need to hack up vrp_meet. It's just
1295 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1296 if (code == EXACT_DIV_EXPR
1297 && vr0.type == VR_ANTI_RANGE
1298 && vr0.min == vr0.max
1299 && integer_zerop (vr0.min))
1301 set_value_range_to_nonnull (vr, expr_type);
1305 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1306 and express ~[] op X as ([]' op X) U ([]'' op X). */
1307 if (vr0.type == VR_ANTI_RANGE
1308 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1310 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1311 if (vrtem1.type != VR_UNDEFINED)
1313 value_range vrres = VR_INITIALIZER;
1314 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1316 vrp_meet (vr, &vrres);
1320 /* Likewise for X op ~[]. */
1321 if (vr1.type == VR_ANTI_RANGE
1322 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1324 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1325 if (vrtem1.type != VR_UNDEFINED)
1327 value_range vrres = VR_INITIALIZER;
1328 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1330 vrp_meet (vr, &vrres);
1335 /* The type of the resulting value range defaults to VR0.TYPE. */
1338 /* Refuse to operate on VARYING ranges, ranges of different kinds
1339 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1340 because we may be able to derive a useful range even if one of
1341 the operands is VR_VARYING or symbolic range. Similarly for
1342 divisions, MIN/MAX and PLUS/MINUS.
1344 TODO, we may be able to derive anti-ranges in some cases. */
1345 if (code != BIT_AND_EXPR
1346 && code != BIT_IOR_EXPR
1347 && code != TRUNC_DIV_EXPR
1348 && code != FLOOR_DIV_EXPR
1349 && code != CEIL_DIV_EXPR
1350 && code != EXACT_DIV_EXPR
1351 && code != ROUND_DIV_EXPR
1352 && code != TRUNC_MOD_EXPR
1355 && code != PLUS_EXPR
1356 && code != MINUS_EXPR
1357 && code != RSHIFT_EXPR
1358 && code != POINTER_PLUS_EXPR
1359 && (vr0.type == VR_VARYING
1360 || vr1.type == VR_VARYING
1361 || vr0.type != vr1.type
1362 || symbolic_range_p (&vr0)
1363 || symbolic_range_p (&vr1)))
1365 set_value_range_to_varying (vr);
1369 /* Now evaluate the expression to determine the new range. */
1370 if (POINTER_TYPE_P (expr_type))
1372 if (code == MIN_EXPR || code == MAX_EXPR)
1374 /* For MIN/MAX expressions with pointers, we only care about
1375 nullness, if both are non null, then the result is nonnull.
1376 If both are null, then the result is null. Otherwise they
1378 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1379 set_value_range_to_nonnull (vr, expr_type);
1380 else if (range_is_null (&vr0) && range_is_null (&vr1))
1381 set_value_range_to_null (vr, expr_type);
1383 set_value_range_to_varying (vr);
1385 else if (code == POINTER_PLUS_EXPR)
1387 /* For pointer types, we are really only interested in asserting
1388 whether the expression evaluates to non-NULL. */
1389 if (!range_includes_zero_p (&vr0)
1390 || !range_includes_zero_p (&vr1))
1391 set_value_range_to_nonnull (vr, expr_type);
1392 else if (range_is_null (&vr0) && range_is_null (&vr1))
1393 set_value_range_to_null (vr, expr_type);
1395 set_value_range_to_varying (vr);
1397 else if (code == BIT_AND_EXPR)
1399 /* For pointer types, we are really only interested in asserting
1400 whether the expression evaluates to non-NULL. */
1401 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1402 set_value_range_to_nonnull (vr, expr_type);
1403 else if (range_is_null (&vr0) || range_is_null (&vr1))
1404 set_value_range_to_null (vr, expr_type);
1406 set_value_range_to_varying (vr);
1409 set_value_range_to_varying (vr);
1414 /* For integer ranges, apply the operation to each end of the
1415 range and see what we end up with. */
1416 if (code == PLUS_EXPR || code == MINUS_EXPR)
1418 /* This will normalize things such that calculating
1419 [0,0] - VR_VARYING is not dropped to varying, but is
1420 calculated as [MIN+1, MAX]. */
1421 if (vr0.type == VR_VARYING)
1423 vr0.type = VR_RANGE;
1424 vr0.min = vrp_val_min (expr_type);
1425 vr0.max = vrp_val_max (expr_type);
1427 if (vr1.type == VR_VARYING)
1429 vr1.type = VR_RANGE;
1430 vr1.min = vrp_val_min (expr_type);
1431 vr1.max = vrp_val_max (expr_type);
1434 const bool minus_p = (code == MINUS_EXPR);
1435 tree min_op0 = vr0.min;
1436 tree min_op1 = minus_p ? vr1.max : vr1.min;
1437 tree max_op0 = vr0.max;
1438 tree max_op1 = minus_p ? vr1.min : vr1.max;
1439 tree sym_min_op0 = NULL_TREE;
1440 tree sym_min_op1 = NULL_TREE;
1441 tree sym_max_op0 = NULL_TREE;
1442 tree sym_max_op1 = NULL_TREE;
1443 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1445 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1447 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1448 single-symbolic ranges, try to compute the precise resulting range,
1449 but only if we know that this resulting range will also be constant
1450 or single-symbolic. */
1451 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1452 && (TREE_CODE (min_op0) == INTEGER_CST
1454 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1455 && (TREE_CODE (min_op1) == INTEGER_CST
1457 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1458 && (!(sym_min_op0 && sym_min_op1)
1459 || (sym_min_op0 == sym_min_op1
1460 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1461 && (TREE_CODE (max_op0) == INTEGER_CST
1463 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1464 && (TREE_CODE (max_op1) == INTEGER_CST
1466 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1467 && (!(sym_max_op0 && sym_max_op1)
1468 || (sym_max_op0 == sym_max_op1
1469 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1471 wide_int wmin, wmax;
1472 wi::overflow_type min_ovf = wi::OVF_NONE;
1473 wi::overflow_type max_ovf = wi::OVF_NONE;
1475 /* Build the bounds. */
1476 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1477 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1479 /* If we have overflow for the constant part and the resulting
1480 range will be symbolic, drop to VR_VARYING. */
1481 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1482 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1484 set_value_range_to_varying (vr);
1488 /* Adjust the range for possible overflow. */
1489 set_value_range_with_overflow (*vr, expr_type,
1490 wmin, wmax, min_ovf, max_ovf);
1491 if (vr->type == VR_VARYING)
1494 /* Build the symbolic bounds if needed. */
1495 adjust_symbolic_bound (vr->min, code, expr_type,
1496 sym_min_op0, sym_min_op1,
1497 neg_min_op0, neg_min_op1);
1498 adjust_symbolic_bound (vr->max, code, expr_type,
1499 sym_max_op0, sym_max_op1,
1500 neg_max_op0, neg_max_op1);
1501 /* ?? It would probably be cleaner to eliminate min/max/type
1502 entirely and hold these values in VR directly. */
1509 /* For other cases, for example if we have a PLUS_EXPR with two
1510 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1511 to compute a precise range for such a case.
1512 ??? General even mixed range kind operations can be expressed
1513 by for example transforming ~[3, 5] + [1, 2] to range-only
1514 operations and a union primitive:
1515 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1516 [-INF+1, 4] U [6, +INF(OVF)]
1517 though usually the union is not exactly representable with
1518 a single range or anti-range as the above is
1519 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1520 but one could use a scheme similar to equivalences for this. */
1521 set_value_range_to_varying (vr);
1525 else if (code == MIN_EXPR
1526 || code == MAX_EXPR)
1528 wide_int wmin, wmax;
1529 wide_int vr0_min, vr0_max;
1530 wide_int vr1_min, vr1_max;
1531 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1532 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1533 if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1534 vr0_min, vr0_max, vr1_min, vr1_max))
1535 set_value_range (vr, VR_RANGE,
1536 wide_int_to_tree (expr_type, wmin),
1537 wide_int_to_tree (expr_type, wmax), NULL);
1539 set_value_range_to_varying (vr);
1542 else if (code == MULT_EXPR)
1544 if (!range_int_cst_p (&vr0)
1545 || !range_int_cst_p (&vr1))
1547 set_value_range_to_varying (vr);
1550 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1553 else if (code == RSHIFT_EXPR
1554 || code == LSHIFT_EXPR)
1556 if (range_int_cst_p (&vr1)
1557 && !wide_int_range_shift_undefined_p (prec,
1558 wi::to_wide (vr1.min),
1559 wi::to_wide (vr1.max)))
1561 if (code == RSHIFT_EXPR)
1563 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1564 useful ranges just from the shift count. E.g.
1565 x >> 63 for signed 64-bit x is always [-1, 0]. */
1566 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1568 vr0.type = type = VR_RANGE;
1569 vr0.min = vrp_val_min (expr_type);
1570 vr0.max = vrp_val_max (expr_type);
1572 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1575 else if (code == LSHIFT_EXPR
1576 && range_int_cst_p (&vr0))
1578 wide_int res_lb, res_ub;
1579 if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1580 wi::to_wide (vr0.min),
1581 wi::to_wide (vr0.max),
1582 wi::to_wide (vr1.min),
1583 wi::to_wide (vr1.max),
1584 TYPE_OVERFLOW_UNDEFINED (expr_type),
1585 TYPE_OVERFLOW_WRAPS (expr_type)))
1587 min = wide_int_to_tree (expr_type, res_lb);
1588 max = wide_int_to_tree (expr_type, res_ub);
1589 set_and_canonicalize_value_range (vr, VR_RANGE,
1595 set_value_range_to_varying (vr);
1598 else if (code == TRUNC_DIV_EXPR
1599 || code == FLOOR_DIV_EXPR
1600 || code == CEIL_DIV_EXPR
1601 || code == EXACT_DIV_EXPR
1602 || code == ROUND_DIV_EXPR)
1604 wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1605 wide_int wmin, wmax, extra_min, extra_max;
1608 /* Special case explicit division by zero as undefined. */
1609 if (range_is_null (&vr1))
1611 set_value_range_to_undefined (vr);
1615 /* First, normalize ranges into constants we can handle. Note
1616 that VR_ANTI_RANGE's of constants were already normalized
1617 before arriving here.
1619 NOTE: As a future improvement, we may be able to do better
1620 with mixed symbolic (anti-)ranges like [0, A]. See note in
1621 ranges_from_anti_range. */
1622 extract_range_into_wide_ints (&vr0, sign, prec,
1623 dividend_min, dividend_max);
1624 extract_range_into_wide_ints (&vr1, sign, prec,
1625 divisor_min, divisor_max);
1626 if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1627 dividend_min, dividend_max,
1628 divisor_min, divisor_max,
1629 TYPE_OVERFLOW_UNDEFINED (expr_type),
1630 TYPE_OVERFLOW_WRAPS (expr_type),
1631 extra_range_p, extra_min, extra_max))
1633 set_value_range_to_varying (vr);
1636 set_value_range (vr, VR_RANGE,
1637 wide_int_to_tree (expr_type, wmin),
1638 wide_int_to_tree (expr_type, wmax), NULL);
1641 value_range extra_range = VR_INITIALIZER;
1642 set_value_range (&extra_range, VR_RANGE,
1643 wide_int_to_tree (expr_type, extra_min),
1644 wide_int_to_tree (expr_type, extra_max), NULL);
1645 vrp_meet (vr, &extra_range);
1649 else if (code == TRUNC_MOD_EXPR)
1651 if (range_is_null (&vr1))
1653 set_value_range_to_undefined (vr);
1656 wide_int wmin, wmax, tmp;
1657 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1658 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1659 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1660 wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1661 vr0_min, vr0_max, vr1_min, vr1_max);
1662 min = wide_int_to_tree (expr_type, wmin);
1663 max = wide_int_to_tree (expr_type, wmax);
1664 set_value_range (vr, VR_RANGE, min, max, NULL);
1667 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
1669 wide_int may_be_nonzero0, may_be_nonzero1;
1670 wide_int must_be_nonzero0, must_be_nonzero1;
1671 wide_int wmin, wmax;
1672 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1673 vrp_set_zero_nonzero_bits (expr_type, &vr0,
1674 &may_be_nonzero0, &must_be_nonzero0);
1675 vrp_set_zero_nonzero_bits (expr_type, &vr1,
1676 &may_be_nonzero1, &must_be_nonzero1);
1677 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1678 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1679 if (code == BIT_AND_EXPR)
1681 if (wide_int_range_bit_and (wmin, wmax, sign, prec,
1689 min = wide_int_to_tree (expr_type, wmin);
1690 max = wide_int_to_tree (expr_type, wmax);
1691 set_value_range (vr, VR_RANGE, min, max, NULL);
1694 set_value_range_to_varying (vr);
1697 else if (code == BIT_IOR_EXPR)
1699 if (wide_int_range_bit_ior (wmin, wmax, sign,
1707 min = wide_int_to_tree (expr_type, wmin);
1708 max = wide_int_to_tree (expr_type, wmax);
1709 set_value_range (vr, VR_RANGE, min, max, NULL);
1712 set_value_range_to_varying (vr);
1715 else if (code == BIT_XOR_EXPR)
1717 if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
1723 min = wide_int_to_tree (expr_type, wmin);
1724 max = wide_int_to_tree (expr_type, wmax);
1725 set_value_range (vr, VR_RANGE, min, max, NULL);
1728 set_value_range_to_varying (vr);
1735 /* If either MIN or MAX overflowed, then set the resulting range to
1737 if (min == NULL_TREE
1738 || TREE_OVERFLOW_P (min)
1740 || TREE_OVERFLOW_P (max))
1742 set_value_range_to_varying (vr);
1746 /* We punt for [-INF, +INF].
1747 We learn nothing when we have INF on both sides.
1748 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
1749 if (vrp_val_is_min (min) && vrp_val_is_max (max))
1751 set_value_range_to_varying (vr);
1755 cmp = compare_values (min, max);
1756 if (cmp == -2 || cmp == 1)
1758 /* If the new range has its limits swapped around (MIN > MAX),
1759 then the operation caused one of them to wrap around, mark
1760 the new range VARYING. */
1761 set_value_range_to_varying (vr);
1764 set_value_range (vr, type, min, max, NULL);
1767 /* Extract range information from a unary operation CODE based on
1768 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
1769 The resulting range is stored in *VR. */
1772 extract_range_from_unary_expr (value_range *vr,
1773 enum tree_code code, tree type,
1774 const value_range *vr0_, tree op0_type)
1776 signop sign = TYPE_SIGN (type);
1777 unsigned int prec = TYPE_PRECISION (type);
1778 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1780 /* VRP only operates on integral and pointer types. */
1781 if (!(INTEGRAL_TYPE_P (op0_type)
1782 || POINTER_TYPE_P (op0_type))
1783 || !(INTEGRAL_TYPE_P (type)
1784 || POINTER_TYPE_P (type)))
1786 set_value_range_to_varying (vr);
1790 /* If VR0 is UNDEFINED, so is the result. */
1791 if (vr0.type == VR_UNDEFINED)
1793 set_value_range_to_undefined (vr);
1797 /* Handle operations that we express in terms of others. */
1798 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
1800 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
1801 copy_value_range (vr, &vr0);
1804 else if (code == NEGATE_EXPR)
1806 /* -X is simply 0 - X, so re-use existing code that also handles
1807 anti-ranges fine. */
1808 value_range zero = VR_INITIALIZER;
1809 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
1810 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
1813 else if (code == BIT_NOT_EXPR)
1815 /* ~X is simply -1 - X, so re-use existing code that also handles
1816 anti-ranges fine. */
1817 value_range minusone = VR_INITIALIZER;
1818 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
1819 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
1820 type, &minusone, &vr0);
1824 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1825 and express op ~[] as (op []') U (op []''). */
1826 if (vr0.type == VR_ANTI_RANGE
1827 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1829 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
1830 if (vrtem1.type != VR_UNDEFINED)
1832 value_range vrres = VR_INITIALIZER;
1833 extract_range_from_unary_expr (&vrres, code, type,
1835 vrp_meet (vr, &vrres);
1840 if (CONVERT_EXPR_CODE_P (code))
1842 tree inner_type = op0_type;
1843 tree outer_type = type;
1845 /* If the expression evaluates to a pointer, we are only interested in
1846 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
1847 if (POINTER_TYPE_P (type))
1849 if (!range_includes_zero_p (&vr0))
1850 set_value_range_to_nonnull (vr, type);
1851 else if (range_is_null (&vr0))
1852 set_value_range_to_null (vr, type);
1854 set_value_range_to_varying (vr);
1858 /* We normalize everything to a VR_RANGE, but for constant
1859 anti-ranges we must handle them by leaving the final result
1860 as an anti range. This allows us to convert things like
1861 ~[0,5] seamlessly. */
1862 value_range_type vr_type = VR_RANGE;
1863 if (vr0.type == VR_ANTI_RANGE
1864 && TREE_CODE (vr0.min) == INTEGER_CST
1865 && TREE_CODE (vr0.max) == INTEGER_CST)
1866 vr_type = VR_ANTI_RANGE;
1868 /* NOTES: Previously we were returning VARYING for all symbolics, but
1869 we can do better by treating them as [-MIN, +MAX]. For
1870 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
1871 we can return: ~[0x8000000, 0xffffffff7fffffff].
1873 We were also failing to convert ~[0,0] from char* to unsigned,
1874 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
1875 wide_int vr0_min, vr0_max, wmin, wmax;
1876 signop inner_sign = TYPE_SIGN (inner_type);
1877 signop outer_sign = TYPE_SIGN (outer_type);
1878 unsigned inner_prec = TYPE_PRECISION (inner_type);
1879 unsigned outer_prec = TYPE_PRECISION (outer_type);
1880 extract_range_into_wide_ints (&vr0, inner_sign, inner_prec,
1882 if (wide_int_range_convert (wmin, wmax,
1883 inner_sign, inner_prec,
1884 outer_sign, outer_prec,
1887 tree min = wide_int_to_tree (outer_type, wmin);
1888 tree max = wide_int_to_tree (outer_type, wmax);
1889 set_and_canonicalize_value_range (vr, vr_type, min, max, NULL);
1892 set_value_range_to_varying (vr);
1895 else if (code == ABS_EXPR)
1897 wide_int wmin, wmax;
1898 wide_int vr0_min, vr0_max;
1899 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1900 if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
1901 TYPE_OVERFLOW_UNDEFINED (type)))
1902 set_value_range (vr, VR_RANGE,
1903 wide_int_to_tree (type, wmin),
1904 wide_int_to_tree (type, wmax), NULL);
1906 set_value_range_to_varying (vr);
1910 /* For unhandled operations fall back to varying. */
1911 set_value_range_to_varying (vr);
1915 /* Debugging dumps. */
1917 void dump_value_range (FILE *, const value_range *);
1918 void debug_value_range (const value_range *);
1919 void dump_all_value_ranges (FILE *);
1920 void dump_vr_equiv (FILE *, bitmap);
1921 void debug_vr_equiv (bitmap);
1924 /* Dump value range VR to FILE. */
1927 dump_value_range (FILE *file, const value_range *vr)
1930 fprintf (file, "[]");
1931 else if (vr->type == VR_UNDEFINED)
1932 fprintf (file, "UNDEFINED");
1933 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
1935 tree type = TREE_TYPE (vr->min);
1937 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
1939 if (INTEGRAL_TYPE_P (type)
1940 && !TYPE_UNSIGNED (type)
1941 && vrp_val_is_min (vr->min))
1942 fprintf (file, "-INF");
1944 print_generic_expr (file, vr->min);
1946 fprintf (file, ", ");
1948 if (INTEGRAL_TYPE_P (type)
1949 && vrp_val_is_max (vr->max))
1950 fprintf (file, "+INF");
1952 print_generic_expr (file, vr->max);
1954 fprintf (file, "]");
1961 fprintf (file, " EQUIVALENCES: { ");
1963 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
1965 print_generic_expr (file, ssa_name (i));
1966 fprintf (file, " ");
1970 fprintf (file, "} (%u elements)", c);
1973 else if (vr->type == VR_VARYING)
1974 fprintf (file, "VARYING");
1976 fprintf (file, "INVALID RANGE");
1980 /* Dump value range VR to stderr. */
1983 debug_value_range (const value_range *vr)
1985 dump_value_range (stderr, vr);
1986 fprintf (stderr, "\n");
1990 value_range::dump () const
1992 debug_value_range (this);
1996 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1997 create a new SSA name N and return the assertion assignment
1998 'N = ASSERT_EXPR <V, V OP W>'. */
2001 build_assert_expr_for (tree cond, tree v)
2006 gcc_assert (TREE_CODE (v) == SSA_NAME
2007 && COMPARISON_CLASS_P (cond));
2009 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2010 assertion = gimple_build_assign (NULL_TREE, a);
2012 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2013 operand of the ASSERT_EXPR. Create it so the new name and the old one
2014 are registered in the replacement table so that we can fix the SSA web
2015 after adding all the ASSERT_EXPRs. */
2016 tree new_def = create_new_def_for (v, assertion, NULL);
2017 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2018 given we have to be able to fully propagate those out to re-create
2019 valid SSA when removing the asserts. */
2020 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2021 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2027 /* Return false if EXPR is a predicate expression involving floating
2031 fp_predicate (gimple *stmt)
2033 GIMPLE_CHECK (stmt, GIMPLE_COND);
2035 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2038 /* If the range of values taken by OP can be inferred after STMT executes,
2039 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2040 describes the inferred range. Return true if a range could be
2044 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2047 *comp_code_p = ERROR_MARK;
2049 /* Do not attempt to infer anything in names that flow through
2051 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2054 /* If STMT is the last statement of a basic block with no normal
2055 successors, there is no point inferring anything about any of its
2056 operands. We would not be able to find a proper insertion point
2057 for the assertion, anyway. */
2058 if (stmt_ends_bb_p (stmt))
2063 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2064 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2070 if (infer_nonnull_range (stmt, op))
2072 *val_p = build_int_cst (TREE_TYPE (op), 0);
2073 *comp_code_p = NE_EXPR;
2081 void dump_asserts_for (FILE *, tree);
2082 void debug_asserts_for (tree);
2083 void dump_all_asserts (FILE *);
2084 void debug_all_asserts (void);
2086 /* Dump all the registered assertions for NAME to FILE. */
2089 dump_asserts_for (FILE *file, tree name)
2093 fprintf (file, "Assertions to be inserted for ");
2094 print_generic_expr (file, name);
2095 fprintf (file, "\n");
2097 loc = asserts_for[SSA_NAME_VERSION (name)];
2100 fprintf (file, "\t");
2101 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2102 fprintf (file, "\n\tBB #%d", loc->bb->index);
2105 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2106 loc->e->dest->index);
2107 dump_edge_info (file, loc->e, dump_flags, 0);
2109 fprintf (file, "\n\tPREDICATE: ");
2110 print_generic_expr (file, loc->expr);
2111 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2112 print_generic_expr (file, loc->val);
2113 fprintf (file, "\n\n");
2117 fprintf (file, "\n");
2121 /* Dump all the registered assertions for NAME to stderr. */
2124 debug_asserts_for (tree name)
2126 dump_asserts_for (stderr, name);
2130 /* Dump all the registered assertions for all the names to FILE. */
2133 dump_all_asserts (FILE *file)
2138 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2139 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2140 dump_asserts_for (file, ssa_name (i));
2141 fprintf (file, "\n");
2145 /* Dump all the registered assertions for all the names to stderr. */
2148 debug_all_asserts (void)
2150 dump_all_asserts (stderr);
2153 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2156 add_assert_info (vec<assert_info> &asserts,
2157 tree name, tree expr, enum tree_code comp_code, tree val)
2160 info.comp_code = comp_code;
2162 if (TREE_OVERFLOW_P (val))
2163 val = drop_tree_overflow (val);
2166 asserts.safe_push (info);
2169 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2170 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2171 E->DEST, then register this location as a possible insertion point
2172 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2174 BB, E and SI provide the exact insertion point for the new
2175 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2176 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2177 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2178 must not be NULL. */
2181 register_new_assert_for (tree name, tree expr,
2182 enum tree_code comp_code,
2186 gimple_stmt_iterator si)
2188 assert_locus *n, *loc, *last_loc;
2189 basic_block dest_bb;
2191 gcc_checking_assert (bb == NULL || e == NULL);
2194 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2195 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2197 /* Never build an assert comparing against an integer constant with
2198 TREE_OVERFLOW set. This confuses our undefined overflow warning
2200 if (TREE_OVERFLOW_P (val))
2201 val = drop_tree_overflow (val);
2203 /* The new assertion A will be inserted at BB or E. We need to
2204 determine if the new location is dominated by a previously
2205 registered location for A. If we are doing an edge insertion,
2206 assume that A will be inserted at E->DEST. Note that this is not
2209 If E is a critical edge, it will be split. But even if E is
2210 split, the new block will dominate the same set of blocks that
2213 The reverse, however, is not true, blocks dominated by E->DEST
2214 will not be dominated by the new block created to split E. So,
2215 if the insertion location is on a critical edge, we will not use
2216 the new location to move another assertion previously registered
2217 at a block dominated by E->DEST. */
2218 dest_bb = (bb) ? bb : e->dest;
2220 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2221 VAL at a block dominating DEST_BB, then we don't need to insert a new
2222 one. Similarly, if the same assertion already exists at a block
2223 dominated by DEST_BB and the new location is not on a critical
2224 edge, then update the existing location for the assertion (i.e.,
2225 move the assertion up in the dominance tree).
2227 Note, this is implemented as a simple linked list because there
2228 should not be more than a handful of assertions registered per
2229 name. If this becomes a performance problem, a table hashed by
2230 COMP_CODE and VAL could be implemented. */
2231 loc = asserts_for[SSA_NAME_VERSION (name)];
2235 if (loc->comp_code == comp_code
2237 || operand_equal_p (loc->val, val, 0))
2238 && (loc->expr == expr
2239 || operand_equal_p (loc->expr, expr, 0)))
2241 /* If E is not a critical edge and DEST_BB
2242 dominates the existing location for the assertion, move
2243 the assertion up in the dominance tree by updating its
2244 location information. */
2245 if ((e == NULL || !EDGE_CRITICAL_P (e))
2246 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2255 /* Update the last node of the list and move to the next one. */
2260 /* If we didn't find an assertion already registered for
2261 NAME COMP_CODE VAL, add a new one at the end of the list of
2262 assertions associated with NAME. */
2263 n = XNEW (struct assert_locus);
2267 n->comp_code = comp_code;
2275 asserts_for[SSA_NAME_VERSION (name)] = n;
2277 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2280 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2281 Extract a suitable test code and value and store them into *CODE_P and
2282 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2284 If no extraction was possible, return FALSE, otherwise return TRUE.
2286 If INVERT is true, then we invert the result stored into *CODE_P. */
2289 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2290 tree cond_op0, tree cond_op1,
2291 bool invert, enum tree_code *code_p,
2294 enum tree_code comp_code;
2297 /* Otherwise, we have a comparison of the form NAME COMP VAL
2298 or VAL COMP NAME. */
2299 if (name == cond_op1)
2301 /* If the predicate is of the form VAL COMP NAME, flip
2302 COMP around because we need to register NAME as the
2303 first operand in the predicate. */
2304 comp_code = swap_tree_comparison (cond_code);
2307 else if (name == cond_op0)
2309 /* The comparison is of the form NAME COMP VAL, so the
2310 comparison code remains unchanged. */
2311 comp_code = cond_code;
2317 /* Invert the comparison code as necessary. */
2319 comp_code = invert_tree_comparison (comp_code, 0);
2321 /* VRP only handles integral and pointer types. */
2322 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2323 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2326 /* Do not register always-false predicates.
2327 FIXME: this works around a limitation in fold() when dealing with
2328 enumerations. Given 'enum { N1, N2 } x;', fold will not
2329 fold 'if (x > N2)' to 'if (0)'. */
2330 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2331 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2333 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2334 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2336 if (comp_code == GT_EXPR
2338 || compare_values (val, max) == 0))
2341 if (comp_code == LT_EXPR
2343 || compare_values (val, min) == 0))
2346 *code_p = comp_code;
2351 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2352 (otherwise return VAL). VAL and MASK must be zero-extended for
2353 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2354 (to transform signed values into unsigned) and at the end xor
2358 masked_increment (const wide_int &val_in, const wide_int &mask,
2359 const wide_int &sgnbit, unsigned int prec)
2361 wide_int bit = wi::one (prec), res;
2364 wide_int val = val_in ^ sgnbit;
2365 for (i = 0; i < prec; i++, bit += bit)
2368 if ((res & bit) == 0)
2371 res = wi::bit_and_not (val + bit, res);
2373 if (wi::gtu_p (res, val))
2374 return res ^ sgnbit;
2376 return val ^ sgnbit;
2379 /* Helper for overflow_comparison_p
2381 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2382 OP1's defining statement to see if it ultimately has the form
2383 OP0 CODE (OP0 PLUS INTEGER_CST)
2385 If so, return TRUE indicating this is an overflow test and store into
2386 *NEW_CST an updated constant that can be used in a narrowed range test.
2388 REVERSED indicates if the comparison was originally:
2392 This affects how we build the updated constant. */
2395 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2396 bool follow_assert_exprs, bool reversed, tree *new_cst)
2398 /* See if this is a relational operation between two SSA_NAMES with
2399 unsigned, overflow wrapping values. If so, check it more deeply. */
2400 if ((code == LT_EXPR || code == LE_EXPR
2401 || code == GE_EXPR || code == GT_EXPR)
2402 && TREE_CODE (op0) == SSA_NAME
2403 && TREE_CODE (op1) == SSA_NAME
2404 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2405 && TYPE_UNSIGNED (TREE_TYPE (op0))
2406 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2408 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2410 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2411 if (follow_assert_exprs)
2413 while (gimple_assign_single_p (op1_def)
2414 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2416 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2417 if (TREE_CODE (op1) != SSA_NAME)
2419 op1_def = SSA_NAME_DEF_STMT (op1);
2423 /* Now look at the defining statement of OP1 to see if it adds
2424 or subtracts a nonzero constant from another operand. */
2426 && is_gimple_assign (op1_def)
2427 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2428 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2429 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2431 tree target = gimple_assign_rhs1 (op1_def);
2433 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2434 for one where TARGET appears on the RHS. */
2435 if (follow_assert_exprs)
2437 /* Now see if that "other operand" is op0, following the chain
2438 of ASSERT_EXPRs if necessary. */
2439 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2440 while (op0 != target
2441 && gimple_assign_single_p (op0_def)
2442 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2444 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2445 if (TREE_CODE (op0) != SSA_NAME)
2447 op0_def = SSA_NAME_DEF_STMT (op0);
2451 /* If we did not find our target SSA_NAME, then this is not
2452 an overflow test. */
2456 tree type = TREE_TYPE (op0);
2457 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2458 tree inc = gimple_assign_rhs2 (op1_def);
2460 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2462 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2469 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2470 OP1's defining statement to see if it ultimately has the form
2471 OP0 CODE (OP0 PLUS INTEGER_CST)
2473 If so, return TRUE indicating this is an overflow test and store into
2474 *NEW_CST an updated constant that can be used in a narrowed range test.
2476 These statements are left as-is in the IL to facilitate discovery of
2477 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2478 the alternate range representation is often useful within VRP. */
2481 overflow_comparison_p (tree_code code, tree name, tree val,
2482 bool use_equiv_p, tree *new_cst)
2484 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2486 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2487 use_equiv_p, true, new_cst);
2491 /* Try to register an edge assertion for SSA name NAME on edge E for
2492 the condition COND contributing to the conditional jump pointed to by BSI.
2493 Invert the condition COND if INVERT is true. */
2496 register_edge_assert_for_2 (tree name, edge e,
2497 enum tree_code cond_code,
2498 tree cond_op0, tree cond_op1, bool invert,
2499 vec<assert_info> &asserts)
2502 enum tree_code comp_code;
2504 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2507 invert, &comp_code, &val))
2510 /* Queue the assert. */
2512 if (overflow_comparison_p (comp_code, name, val, false, &x))
2514 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2515 ? GT_EXPR : LE_EXPR);
2516 add_assert_info (asserts, name, name, new_code, x);
2518 add_assert_info (asserts, name, name, comp_code, val);
2520 /* In the case of NAME <= CST and NAME being defined as
2521 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2522 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2523 This catches range and anti-range tests. */
2524 if ((comp_code == LE_EXPR
2525 || comp_code == GT_EXPR)
2526 && TREE_CODE (val) == INTEGER_CST
2527 && TYPE_UNSIGNED (TREE_TYPE (val)))
2529 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2530 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2532 /* Extract CST2 from the (optional) addition. */
2533 if (is_gimple_assign (def_stmt)
2534 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2536 name2 = gimple_assign_rhs1 (def_stmt);
2537 cst2 = gimple_assign_rhs2 (def_stmt);
2538 if (TREE_CODE (name2) == SSA_NAME
2539 && TREE_CODE (cst2) == INTEGER_CST)
2540 def_stmt = SSA_NAME_DEF_STMT (name2);
2543 /* Extract NAME2 from the (optional) sign-changing cast. */
2544 if (gimple_assign_cast_p (def_stmt))
2546 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2547 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2548 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2549 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2550 name3 = gimple_assign_rhs1 (def_stmt);
2553 /* If name3 is used later, create an ASSERT_EXPR for it. */
2554 if (name3 != NULL_TREE
2555 && TREE_CODE (name3) == SSA_NAME
2556 && (cst2 == NULL_TREE
2557 || TREE_CODE (cst2) == INTEGER_CST)
2558 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2562 /* Build an expression for the range test. */
2563 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2564 if (cst2 != NULL_TREE)
2565 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2569 fprintf (dump_file, "Adding assert for ");
2570 print_generic_expr (dump_file, name3);
2571 fprintf (dump_file, " from ");
2572 print_generic_expr (dump_file, tmp);
2573 fprintf (dump_file, "\n");
2576 add_assert_info (asserts, name3, tmp, comp_code, val);
2579 /* If name2 is used later, create an ASSERT_EXPR for it. */
2580 if (name2 != NULL_TREE
2581 && TREE_CODE (name2) == SSA_NAME
2582 && TREE_CODE (cst2) == INTEGER_CST
2583 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2587 /* Build an expression for the range test. */
2589 if (TREE_TYPE (name) != TREE_TYPE (name2))
2590 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2591 if (cst2 != NULL_TREE)
2592 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2596 fprintf (dump_file, "Adding assert for ");
2597 print_generic_expr (dump_file, name2);
2598 fprintf (dump_file, " from ");
2599 print_generic_expr (dump_file, tmp);
2600 fprintf (dump_file, "\n");
2603 add_assert_info (asserts, name2, tmp, comp_code, val);
2607 /* In the case of post-in/decrement tests like if (i++) ... and uses
2608 of the in/decremented value on the edge the extra name we want to
2609 assert for is not on the def chain of the name compared. Instead
2610 it is in the set of use stmts.
2611 Similar cases happen for conversions that were simplified through
2612 fold_{sign_changed,widened}_comparison. */
2613 if ((comp_code == NE_EXPR
2614 || comp_code == EQ_EXPR)
2615 && TREE_CODE (val) == INTEGER_CST)
2617 imm_use_iterator ui;
2619 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2621 if (!is_gimple_assign (use_stmt))
2624 /* Cut off to use-stmts that are dominating the predecessor. */
2625 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2628 tree name2 = gimple_assign_lhs (use_stmt);
2629 if (TREE_CODE (name2) != SSA_NAME)
2632 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2634 if (code == PLUS_EXPR
2635 || code == MINUS_EXPR)
2637 cst = gimple_assign_rhs2 (use_stmt);
2638 if (TREE_CODE (cst) != INTEGER_CST)
2640 cst = int_const_binop (code, val, cst);
2642 else if (CONVERT_EXPR_CODE_P (code))
2644 /* For truncating conversions we cannot record
2646 if (comp_code == NE_EXPR
2647 && (TYPE_PRECISION (TREE_TYPE (name2))
2648 < TYPE_PRECISION (TREE_TYPE (name))))
2650 cst = fold_convert (TREE_TYPE (name2), val);
2655 if (TREE_OVERFLOW_P (cst))
2656 cst = drop_tree_overflow (cst);
2657 add_assert_info (asserts, name2, name2, comp_code, cst);
2661 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2662 && TREE_CODE (val) == INTEGER_CST)
2664 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2665 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2666 tree val2 = NULL_TREE;
2667 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2668 wide_int mask = wi::zero (prec);
2669 unsigned int nprec = prec;
2670 enum tree_code rhs_code = ERROR_MARK;
2672 if (is_gimple_assign (def_stmt))
2673 rhs_code = gimple_assign_rhs_code (def_stmt);
2675 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2676 assert that A != CST1 -+ CST2. */
2677 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2678 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2680 tree op0 = gimple_assign_rhs1 (def_stmt);
2681 tree op1 = gimple_assign_rhs2 (def_stmt);
2682 if (TREE_CODE (op0) == SSA_NAME
2683 && TREE_CODE (op1) == INTEGER_CST)
2685 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2686 ? MINUS_EXPR : PLUS_EXPR);
2687 op1 = int_const_binop (reverse_op, val, op1);
2688 if (TREE_OVERFLOW (op1))
2689 op1 = drop_tree_overflow (op1);
2690 add_assert_info (asserts, op0, op0, comp_code, op1);
2694 /* Add asserts for NAME cmp CST and NAME being defined
2695 as NAME = (int) NAME2. */
2696 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2697 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2698 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2699 && gimple_assign_cast_p (def_stmt))
2701 name2 = gimple_assign_rhs1 (def_stmt);
2702 if (CONVERT_EXPR_CODE_P (rhs_code)
2703 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2704 && TYPE_UNSIGNED (TREE_TYPE (name2))
2705 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2706 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2707 || !tree_int_cst_equal (val,
2708 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2711 enum tree_code new_comp_code = comp_code;
2713 cst = fold_convert (TREE_TYPE (name2),
2714 TYPE_MIN_VALUE (TREE_TYPE (val)));
2715 /* Build an expression for the range test. */
2716 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2717 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2718 fold_convert (TREE_TYPE (name2), val));
2719 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2721 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2722 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2723 build_int_cst (TREE_TYPE (name2), 1));
2728 fprintf (dump_file, "Adding assert for ");
2729 print_generic_expr (dump_file, name2);
2730 fprintf (dump_file, " from ");
2731 print_generic_expr (dump_file, tmp);
2732 fprintf (dump_file, "\n");
2735 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2739 /* Add asserts for NAME cmp CST and NAME being defined as
2740 NAME = NAME2 >> CST2.
2742 Extract CST2 from the right shift. */
2743 if (rhs_code == RSHIFT_EXPR)
2745 name2 = gimple_assign_rhs1 (def_stmt);
2746 cst2 = gimple_assign_rhs2 (def_stmt);
2747 if (TREE_CODE (name2) == SSA_NAME
2748 && tree_fits_uhwi_p (cst2)
2749 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2750 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2751 && type_has_mode_precision_p (TREE_TYPE (val)))
2753 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2754 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2757 if (val2 != NULL_TREE
2758 && TREE_CODE (val2) == INTEGER_CST
2759 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2763 enum tree_code new_comp_code = comp_code;
2767 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2769 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2771 tree type = build_nonstandard_integer_type (prec, 1);
2772 tmp = build1 (NOP_EXPR, type, name2);
2773 val2 = fold_convert (type, val2);
2775 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2776 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2777 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2779 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2782 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2784 if (minval == wi::to_wide (new_val))
2785 new_val = NULL_TREE;
2790 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2791 mask |= wi::to_wide (val2);
2792 if (wi::eq_p (mask, maxval))
2793 new_val = NULL_TREE;
2795 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2802 fprintf (dump_file, "Adding assert for ");
2803 print_generic_expr (dump_file, name2);
2804 fprintf (dump_file, " from ");
2805 print_generic_expr (dump_file, tmp);
2806 fprintf (dump_file, "\n");
2809 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2813 /* Add asserts for NAME cmp CST and NAME being defined as
2814 NAME = NAME2 & CST2.
2816 Extract CST2 from the and.
2819 NAME = (unsigned) NAME2;
2820 casts where NAME's type is unsigned and has smaller precision
2821 than NAME2's type as if it was NAME = NAME2 & MASK. */
2822 names[0] = NULL_TREE;
2823 names[1] = NULL_TREE;
2825 if (rhs_code == BIT_AND_EXPR
2826 || (CONVERT_EXPR_CODE_P (rhs_code)
2827 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2828 && TYPE_UNSIGNED (TREE_TYPE (val))
2829 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2832 name2 = gimple_assign_rhs1 (def_stmt);
2833 if (rhs_code == BIT_AND_EXPR)
2834 cst2 = gimple_assign_rhs2 (def_stmt);
2837 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2838 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2840 if (TREE_CODE (name2) == SSA_NAME
2841 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2842 && TREE_CODE (cst2) == INTEGER_CST
2843 && !integer_zerop (cst2)
2845 || TYPE_UNSIGNED (TREE_TYPE (val))))
2847 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2848 if (gimple_assign_cast_p (def_stmt2))
2850 names[1] = gimple_assign_rhs1 (def_stmt2);
2851 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2852 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2853 || (TYPE_PRECISION (TREE_TYPE (name2))
2854 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2855 names[1] = NULL_TREE;
2860 if (names[0] || names[1])
2862 wide_int minv, maxv, valv, cst2v;
2863 wide_int tem, sgnbit;
2864 bool valid_p = false, valn, cst2n;
2865 enum tree_code ccode = comp_code;
2867 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2868 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2869 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2870 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2871 /* If CST2 doesn't have most significant bit set,
2872 but VAL is negative, we have comparison like
2873 if ((x & 0x123) > -4) (always true). Just give up. */
2877 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2879 sgnbit = wi::zero (nprec);
2880 minv = valv & cst2v;
2884 /* Minimum unsigned value for equality is VAL & CST2
2885 (should be equal to VAL, otherwise we probably should
2886 have folded the comparison into false) and
2887 maximum unsigned value is VAL | ~CST2. */
2888 maxv = valv | ~cst2v;
2893 tem = valv | ~cst2v;
2894 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2898 sgnbit = wi::zero (nprec);
2901 /* If (VAL | ~CST2) is all ones, handle it as
2902 (X & CST2) < VAL. */
2907 sgnbit = wi::zero (nprec);
2910 if (!cst2n && wi::neg_p (cst2v))
2911 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2920 if (tem == wi::mask (nprec - 1, false, nprec))
2926 sgnbit = wi::zero (nprec);
2931 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2932 is VAL and maximum unsigned value is ~0. For signed
2933 comparison, if CST2 doesn't have most significant bit
2934 set, handle it similarly. If CST2 has MSB set,
2935 the minimum is the same, and maximum is ~0U/2. */
2938 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2940 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2944 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2950 /* Find out smallest MINV where MINV > VAL
2951 && (MINV & CST2) == MINV, if any. If VAL is signed and
2952 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2953 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2956 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2961 /* Minimum unsigned value for <= is 0 and maximum
2962 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2963 Otherwise, find smallest VAL2 where VAL2 > VAL
2964 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2966 For signed comparison, if CST2 doesn't have most
2967 significant bit set, handle it similarly. If CST2 has
2968 MSB set, the maximum is the same and minimum is INT_MIN. */
2973 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2985 /* Minimum unsigned value for < is 0 and maximum
2986 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2987 Otherwise, find smallest VAL2 where VAL2 > VAL
2988 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2990 For signed comparison, if CST2 doesn't have most
2991 significant bit set, handle it similarly. If CST2 has
2992 MSB set, the maximum is the same and minimum is INT_MIN. */
3001 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3015 && (maxv - minv) != -1)
3017 tree tmp, new_val, type;
3020 for (i = 0; i < 2; i++)
3023 wide_int maxv2 = maxv;
3025 type = TREE_TYPE (names[i]);
3026 if (!TYPE_UNSIGNED (type))
3028 type = build_nonstandard_integer_type (nprec, 1);
3029 tmp = build1 (NOP_EXPR, type, names[i]);
3033 tmp = build2 (PLUS_EXPR, type, tmp,
3034 wide_int_to_tree (type, -minv));
3035 maxv2 = maxv - minv;
3037 new_val = wide_int_to_tree (type, maxv2);
3041 fprintf (dump_file, "Adding assert for ");
3042 print_generic_expr (dump_file, names[i]);
3043 fprintf (dump_file, " from ");
3044 print_generic_expr (dump_file, tmp);
3045 fprintf (dump_file, "\n");
3048 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3055 /* OP is an operand of a truth value expression which is known to have
3056 a particular value. Register any asserts for OP and for any
3057 operands in OP's defining statement.
3059 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3060 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3063 register_edge_assert_for_1 (tree op, enum tree_code code,
3064 edge e, vec<assert_info> &asserts)
3068 enum tree_code rhs_code;
3070 /* We only care about SSA_NAMEs. */
3071 if (TREE_CODE (op) != SSA_NAME)
3074 /* We know that OP will have a zero or nonzero value. */
3075 val = build_int_cst (TREE_TYPE (op), 0);
3076 add_assert_info (asserts, op, op, code, val);
3078 /* Now look at how OP is set. If it's set from a comparison,
3079 a truth operation or some bit operations, then we may be able
3080 to register information about the operands of that assignment. */
3081 op_def = SSA_NAME_DEF_STMT (op);
3082 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3085 rhs_code = gimple_assign_rhs_code (op_def);
3087 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3089 bool invert = (code == EQ_EXPR ? true : false);
3090 tree op0 = gimple_assign_rhs1 (op_def);
3091 tree op1 = gimple_assign_rhs2 (op_def);
3093 if (TREE_CODE (op0) == SSA_NAME)
3094 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3095 if (TREE_CODE (op1) == SSA_NAME)
3096 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3098 else if ((code == NE_EXPR
3099 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3101 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3103 /* Recurse on each operand. */
3104 tree op0 = gimple_assign_rhs1 (op_def);
3105 tree op1 = gimple_assign_rhs2 (op_def);
3106 if (TREE_CODE (op0) == SSA_NAME
3107 && has_single_use (op0))
3108 register_edge_assert_for_1 (op0, code, e, asserts);
3109 if (TREE_CODE (op1) == SSA_NAME
3110 && has_single_use (op1))
3111 register_edge_assert_for_1 (op1, code, e, asserts);
3113 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3114 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3116 /* Recurse, flipping CODE. */
3117 code = invert_tree_comparison (code, false);
3118 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3120 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3122 /* Recurse through the copy. */
3123 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3125 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3127 /* Recurse through the type conversion, unless it is a narrowing
3128 conversion or conversion from non-integral type. */
3129 tree rhs = gimple_assign_rhs1 (op_def);
3130 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3131 && (TYPE_PRECISION (TREE_TYPE (rhs))
3132 <= TYPE_PRECISION (TREE_TYPE (op))))
3133 register_edge_assert_for_1 (rhs, code, e, asserts);
3137 /* Check if comparison
3138 NAME COND_OP INTEGER_CST
3140 (X & 11...100..0) COND_OP XX...X00...0
3141 Such comparison can yield assertions like
3144 in case of COND_OP being EQ_EXPR or
3147 in case of NE_EXPR. */
3150 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3151 tree *new_name, tree *low, enum tree_code *low_code,
3152 tree *high, enum tree_code *high_code)
3154 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3156 if (!is_gimple_assign (def_stmt)
3157 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3160 tree t = gimple_assign_rhs1 (def_stmt);
3161 tree maskt = gimple_assign_rhs2 (def_stmt);
3162 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3165 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3166 wide_int inv_mask = ~mask;
3167 /* Must have been removed by now so don't bother optimizing. */
3168 if (mask == 0 || inv_mask == 0)
3171 /* Assume VALT is INTEGER_CST. */
3172 wi::tree_to_wide_ref val = wi::to_wide (valt);
3174 if ((inv_mask & (inv_mask + 1)) != 0
3175 || (val & mask) != val)
3178 bool is_range = cond_code == EQ_EXPR;
3180 tree type = TREE_TYPE (t);
3181 wide_int min = wi::min_value (type),
3182 max = wi::max_value (type);
3186 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3187 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3191 /* We can still generate assertion if one of alternatives
3192 is known to always be false. */
3195 *low_code = (enum tree_code) 0;
3196 *high_code = GT_EXPR;
3198 else if ((val | inv_mask) == max)
3200 *low_code = LT_EXPR;
3201 *high_code = (enum tree_code) 0;
3208 *low = wide_int_to_tree (type, val);
3209 *high = wide_int_to_tree (type, val | inv_mask);
3214 /* Try to register an edge assertion for SSA name NAME on edge E for
3215 the condition COND contributing to the conditional jump pointed to by
3219 register_edge_assert_for (tree name, edge e,
3220 enum tree_code cond_code, tree cond_op0,
3221 tree cond_op1, vec<assert_info> &asserts)
3224 enum tree_code comp_code;
3225 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3227 /* Do not attempt to infer anything in names that flow through
3229 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3232 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3238 /* Register ASSERT_EXPRs for name. */
3239 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3240 cond_op1, is_else_edge, asserts);
3243 /* If COND is effectively an equality test of an SSA_NAME against
3244 the value zero or one, then we may be able to assert values
3245 for SSA_NAMEs which flow into COND. */
3247 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3248 statement of NAME we can assert both operands of the BIT_AND_EXPR
3249 have nonzero value. */
3250 if (((comp_code == EQ_EXPR && integer_onep (val))
3251 || (comp_code == NE_EXPR && integer_zerop (val))))
3253 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3255 if (is_gimple_assign (def_stmt)
3256 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3258 tree op0 = gimple_assign_rhs1 (def_stmt);
3259 tree op1 = gimple_assign_rhs2 (def_stmt);
3260 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3261 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3265 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3266 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3268 if (((comp_code == EQ_EXPR && integer_zerop (val))
3269 || (comp_code == NE_EXPR && integer_onep (val))))
3271 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3273 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3274 necessarily zero value, or if type-precision is one. */
3275 if (is_gimple_assign (def_stmt)
3276 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3277 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3278 || comp_code == EQ_EXPR)))
3280 tree op0 = gimple_assign_rhs1 (def_stmt);
3281 tree op1 = gimple_assign_rhs2 (def_stmt);
3282 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3283 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3287 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3288 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3289 && TREE_CODE (val) == INTEGER_CST)
3291 enum tree_code low_code, high_code;
3293 if (is_masked_range_test (name, val, comp_code, &name, &low,
3294 &low_code, &high, &high_code))
3296 if (low_code != ERROR_MARK)
3297 register_edge_assert_for_2 (name, e, low_code, name,
3298 low, /*invert*/false, asserts);
3299 if (high_code != ERROR_MARK)
3300 register_edge_assert_for_2 (name, e, high_code, name,
3301 high, /*invert*/false, asserts);
3306 /* Finish found ASSERTS for E and register them at GSI. */
3309 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3310 vec<assert_info> &asserts)
3312 for (unsigned i = 0; i < asserts.length (); ++i)
3313 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3314 reachable from E. */
3315 if (live_on_edge (e, asserts[i].name))
3316 register_new_assert_for (asserts[i].name, asserts[i].expr,
3317 asserts[i].comp_code, asserts[i].val,
3323 /* Determine whether the outgoing edges of BB should receive an
3324 ASSERT_EXPR for each of the operands of BB's LAST statement.
3325 The last statement of BB must be a COND_EXPR.
3327 If any of the sub-graphs rooted at BB have an interesting use of
3328 the predicate operands, an assert location node is added to the
3329 list of assertions for the corresponding operands. */
3332 find_conditional_asserts (basic_block bb, gcond *last)
3334 gimple_stmt_iterator bsi;
3340 bsi = gsi_for_stmt (last);
3342 /* Look for uses of the operands in each of the sub-graphs
3343 rooted at BB. We need to check each of the outgoing edges
3344 separately, so that we know what kind of ASSERT_EXPR to
3346 FOR_EACH_EDGE (e, ei, bb->succs)
3351 /* Register the necessary assertions for each operand in the
3352 conditional predicate. */
3353 auto_vec<assert_info, 8> asserts;
3354 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3355 register_edge_assert_for (op, e,
3356 gimple_cond_code (last),
3357 gimple_cond_lhs (last),
3358 gimple_cond_rhs (last), asserts);
3359 finish_register_edge_assert_for (e, bsi, asserts);
3369 /* Compare two case labels sorting first by the destination bb index
3370 and then by the case value. */
3373 compare_case_labels (const void *p1, const void *p2)
3375 const struct case_info *ci1 = (const struct case_info *) p1;
3376 const struct case_info *ci2 = (const struct case_info *) p2;
3377 int idx1 = ci1->bb->index;
3378 int idx2 = ci2->bb->index;
3382 else if (idx1 == idx2)
3384 /* Make sure the default label is first in a group. */
3385 if (!CASE_LOW (ci1->expr))
3387 else if (!CASE_LOW (ci2->expr))
3390 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3391 CASE_LOW (ci2->expr));
3397 /* Determine whether the outgoing edges of BB should receive an
3398 ASSERT_EXPR for each of the operands of BB's LAST statement.
3399 The last statement of BB must be a SWITCH_EXPR.
3401 If any of the sub-graphs rooted at BB have an interesting use of
3402 the predicate operands, an assert location node is added to the
3403 list of assertions for the corresponding operands. */
3406 find_switch_asserts (basic_block bb, gswitch *last)
3408 gimple_stmt_iterator bsi;
3411 struct case_info *ci;
3412 size_t n = gimple_switch_num_labels (last);
3413 #if GCC_VERSION >= 4000
3416 /* Work around GCC 3.4 bug (PR 37086). */
3417 volatile unsigned int idx;
3420 bsi = gsi_for_stmt (last);
3421 op = gimple_switch_index (last);
3422 if (TREE_CODE (op) != SSA_NAME)
3425 /* Build a vector of case labels sorted by destination label. */
3426 ci = XNEWVEC (struct case_info, n);
3427 for (idx = 0; idx < n; ++idx)
3429 ci[idx].expr = gimple_switch_label (last, idx);
3430 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3432 edge default_edge = find_edge (bb, ci[0].bb);
3433 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3435 for (idx = 0; idx < n; ++idx)
3438 tree cl = ci[idx].expr;
3439 basic_block cbb = ci[idx].bb;
3441 min = CASE_LOW (cl);
3442 max = CASE_HIGH (cl);
3444 /* If there are multiple case labels with the same destination
3445 we need to combine them to a single value range for the edge. */
3446 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3448 /* Skip labels until the last of the group. */
3451 } while (idx < n && cbb == ci[idx].bb);
3454 /* Pick up the maximum of the case label range. */
3455 if (CASE_HIGH (ci[idx].expr))
3456 max = CASE_HIGH (ci[idx].expr);
3458 max = CASE_LOW (ci[idx].expr);
3461 /* Can't extract a useful assertion out of a range that includes the
3463 if (min == NULL_TREE)
3466 /* Find the edge to register the assert expr on. */
3467 e = find_edge (bb, cbb);
3469 /* Register the necessary assertions for the operand in the
3471 auto_vec<assert_info, 8> asserts;
3472 register_edge_assert_for (op, e,
3473 max ? GE_EXPR : EQ_EXPR,
3474 op, fold_convert (TREE_TYPE (op), min),
3477 register_edge_assert_for (op, e, LE_EXPR, op,
3478 fold_convert (TREE_TYPE (op), max),
3480 finish_register_edge_assert_for (e, bsi, asserts);
3485 if (!live_on_edge (default_edge, op))
3488 /* Now register along the default label assertions that correspond to the
3489 anti-range of each label. */
3490 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3491 if (insertion_limit == 0)
3494 /* We can't do this if the default case shares a label with another case. */
3495 tree default_cl = gimple_switch_default_label (last);
3496 for (idx = 1; idx < n; idx++)
3499 tree cl = gimple_switch_label (last, idx);
3500 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3503 min = CASE_LOW (cl);
3504 max = CASE_HIGH (cl);
3506 /* Combine contiguous case ranges to reduce the number of assertions
3508 for (idx = idx + 1; idx < n; idx++)
3510 tree next_min, next_max;
3511 tree next_cl = gimple_switch_label (last, idx);
3512 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3515 next_min = CASE_LOW (next_cl);
3516 next_max = CASE_HIGH (next_cl);
3518 wide_int difference = (wi::to_wide (next_min)
3519 - wi::to_wide (max ? max : min));
3520 if (wi::eq_p (difference, 1))
3521 max = next_max ? next_max : next_min;
3527 if (max == NULL_TREE)
3529 /* Register the assertion OP != MIN. */
3530 auto_vec<assert_info, 8> asserts;
3531 min = fold_convert (TREE_TYPE (op), min);
3532 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3534 finish_register_edge_assert_for (default_edge, bsi, asserts);
3538 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3539 which will give OP the anti-range ~[MIN,MAX]. */
3540 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3541 min = fold_convert (TREE_TYPE (uop), min);
3542 max = fold_convert (TREE_TYPE (uop), max);
3544 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3545 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3546 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3547 NULL, default_edge, bsi);
3550 if (--insertion_limit == 0)
3556 /* Traverse all the statements in block BB looking for statements that
3557 may generate useful assertions for the SSA names in their operand.
3558 If a statement produces a useful assertion A for name N_i, then the
3559 list of assertions already generated for N_i is scanned to
3560 determine if A is actually needed.
3562 If N_i already had the assertion A at a location dominating the
3563 current location, then nothing needs to be done. Otherwise, the
3564 new location for A is recorded instead.
3566 1- For every statement S in BB, all the variables used by S are
3567 added to bitmap FOUND_IN_SUBGRAPH.
3569 2- If statement S uses an operand N in a way that exposes a known
3570 value range for N, then if N was not already generated by an
3571 ASSERT_EXPR, create a new assert location for N. For instance,
3572 if N is a pointer and the statement dereferences it, we can
3573 assume that N is not NULL.
3575 3- COND_EXPRs are a special case of #2. We can derive range
3576 information from the predicate but need to insert different
3577 ASSERT_EXPRs for each of the sub-graphs rooted at the
3578 conditional block. If the last statement of BB is a conditional
3579 expression of the form 'X op Y', then
3581 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3583 b) If the conditional is the only entry point to the sub-graph
3584 corresponding to the THEN_CLAUSE, recurse into it. On
3585 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3586 an ASSERT_EXPR is added for the corresponding variable.
3588 c) Repeat step (b) on the ELSE_CLAUSE.
3590 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3599 In this case, an assertion on the THEN clause is useful to
3600 determine that 'a' is always 9 on that edge. However, an assertion
3601 on the ELSE clause would be unnecessary.
3603 4- If BB does not end in a conditional expression, then we recurse
3604 into BB's dominator children.
3606 At the end of the recursive traversal, every SSA name will have a
3607 list of locations where ASSERT_EXPRs should be added. When a new
3608 location for name N is found, it is registered by calling
3609 register_new_assert_for. That function keeps track of all the
3610 registered assertions to prevent adding unnecessary assertions.
3611 For instance, if a pointer P_4 is dereferenced more than once in a
3612 dominator tree, only the location dominating all the dereference of
3613 P_4 will receive an ASSERT_EXPR. */
3616 find_assert_locations_1 (basic_block bb, sbitmap live)
3620 last = last_stmt (bb);
3622 /* If BB's last statement is a conditional statement involving integer
3623 operands, determine if we need to add ASSERT_EXPRs. */
3625 && gimple_code (last) == GIMPLE_COND
3626 && !fp_predicate (last)
3627 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3628 find_conditional_asserts (bb, as_a <gcond *> (last));
3630 /* If BB's last statement is a switch statement involving integer
3631 operands, determine if we need to add ASSERT_EXPRs. */
3633 && gimple_code (last) == GIMPLE_SWITCH
3634 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3635 find_switch_asserts (bb, as_a <gswitch *> (last));
3637 /* Traverse all the statements in BB marking used names and looking
3638 for statements that may infer assertions for their used operands. */
3639 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3646 stmt = gsi_stmt (si);
3648 if (is_gimple_debug (stmt))
3651 /* See if we can derive an assertion for any of STMT's operands. */
3652 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3655 enum tree_code comp_code;
3657 /* If op is not live beyond this stmt, do not bother to insert
3659 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3662 /* If OP is used in such a way that we can infer a value
3663 range for it, and we don't find a previous assertion for
3664 it, create a new assertion location node for OP. */
3665 if (infer_value_range (stmt, op, &comp_code, &value))
3667 /* If we are able to infer a nonzero value range for OP,
3668 then walk backwards through the use-def chain to see if OP
3669 was set via a typecast.
3671 If so, then we can also infer a nonzero value range
3672 for the operand of the NOP_EXPR. */
3673 if (comp_code == NE_EXPR && integer_zerop (value))
3676 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3678 while (is_gimple_assign (def_stmt)
3679 && CONVERT_EXPR_CODE_P
3680 (gimple_assign_rhs_code (def_stmt))
3682 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3684 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3686 t = gimple_assign_rhs1 (def_stmt);
3687 def_stmt = SSA_NAME_DEF_STMT (t);
3689 /* Note we want to register the assert for the
3690 operand of the NOP_EXPR after SI, not after the
3692 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3693 register_new_assert_for (t, t, comp_code, value,
3698 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3703 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3704 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3705 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3706 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3709 /* Traverse all PHI nodes in BB, updating live. */
3710 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3713 use_operand_p arg_p;
3715 gphi *phi = si.phi ();
3716 tree res = gimple_phi_result (phi);
3718 if (virtual_operand_p (res))
3721 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3723 tree arg = USE_FROM_PTR (arg_p);
3724 if (TREE_CODE (arg) == SSA_NAME)
3725 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3728 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3732 /* Do an RPO walk over the function computing SSA name liveness
3733 on-the-fly and deciding on assert expressions to insert. */
3736 find_assert_locations (void)
3738 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3739 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3740 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3743 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3744 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3745 for (i = 0; i < rpo_cnt; ++i)
3748 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3749 the order we compute liveness and insert asserts we otherwise
3750 fail to insert asserts into the loop latch. */
3752 FOR_EACH_LOOP (loop, 0)
3754 i = loop->latch->index;
3755 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3756 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3757 !gsi_end_p (gsi); gsi_next (&gsi))
3759 gphi *phi = gsi.phi ();
3760 if (virtual_operand_p (gimple_phi_result (phi)))
3762 tree arg = gimple_phi_arg_def (phi, j);
3763 if (TREE_CODE (arg) == SSA_NAME)
3765 if (live[i] == NULL)
3767 live[i] = sbitmap_alloc (num_ssa_names);
3768 bitmap_clear (live[i]);
3770 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3775 for (i = rpo_cnt - 1; i >= 0; --i)
3777 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3783 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3784 bitmap_clear (live[rpo[i]]);
3787 /* Process BB and update the live information with uses in
3789 find_assert_locations_1 (bb, live[rpo[i]]);
3791 /* Merge liveness into the predecessor blocks and free it. */
3792 if (!bitmap_empty_p (live[rpo[i]]))
3795 FOR_EACH_EDGE (e, ei, bb->preds)
3797 int pred = e->src->index;
3798 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3803 live[pred] = sbitmap_alloc (num_ssa_names);
3804 bitmap_clear (live[pred]);
3806 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3808 if (bb_rpo[pred] < pred_rpo)
3809 pred_rpo = bb_rpo[pred];
3812 /* Record the RPO number of the last visited block that needs
3813 live information from this block. */
3814 last_rpo[rpo[i]] = pred_rpo;
3818 sbitmap_free (live[rpo[i]]);
3819 live[rpo[i]] = NULL;
3822 /* We can free all successors live bitmaps if all their
3823 predecessors have been visited already. */
3824 FOR_EACH_EDGE (e, ei, bb->succs)
3825 if (last_rpo[e->dest->index] == i
3826 && live[e->dest->index])
3828 sbitmap_free (live[e->dest->index]);
3829 live[e->dest->index] = NULL;
3834 XDELETEVEC (bb_rpo);
3835 XDELETEVEC (last_rpo);
3836 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3838 sbitmap_free (live[i]);
3842 /* Create an ASSERT_EXPR for NAME and insert it in the location
3843 indicated by LOC. Return true if we made any edge insertions. */
3846 process_assert_insertions_for (tree name, assert_locus *loc)
3848 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3851 gimple *assert_stmt;
3855 /* If we have X <=> X do not insert an assert expr for that. */
3856 if (loc->expr == loc->val)
3859 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3860 assert_stmt = build_assert_expr_for (cond, name);
3863 /* We have been asked to insert the assertion on an edge. This
3864 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3865 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3866 || (gimple_code (gsi_stmt (loc->si))
3869 gsi_insert_on_edge (loc->e, assert_stmt);
3873 /* If the stmt iterator points at the end then this is an insertion
3874 at the beginning of a block. */
3875 if (gsi_end_p (loc->si))
3877 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3878 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3882 /* Otherwise, we can insert right after LOC->SI iff the
3883 statement must not be the last statement in the block. */
3884 stmt = gsi_stmt (loc->si);
3885 if (!stmt_ends_bb_p (stmt))
3887 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3891 /* If STMT must be the last statement in BB, we can only insert new
3892 assertions on the non-abnormal edge out of BB. Note that since
3893 STMT is not control flow, there may only be one non-abnormal/eh edge
3895 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3896 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3898 gsi_insert_on_edge (e, assert_stmt);
3905 /* Qsort helper for sorting assert locations. If stable is true, don't
3906 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3907 on the other side some pointers might be NULL. */
3909 template <bool stable>
3911 compare_assert_loc (const void *pa, const void *pb)
3913 assert_locus * const a = *(assert_locus * const *)pa;
3914 assert_locus * const b = *(assert_locus * const *)pb;
3916 /* If stable, some asserts might be optimized away already, sort
3926 if (a->e == NULL && b->e != NULL)
3928 else if (a->e != NULL && b->e == NULL)
3931 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3932 no need to test both a->e and b->e. */
3934 /* Sort after destination index. */
3937 else if (a->e->dest->index > b->e->dest->index)
3939 else if (a->e->dest->index < b->e->dest->index)
3942 /* Sort after comp_code. */
3943 if (a->comp_code > b->comp_code)
3945 else if (a->comp_code < b->comp_code)
3950 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3951 uses DECL_UID of the VAR_DECL, so sorting might differ between
3952 -g and -g0. When doing the removal of redundant assert exprs
3953 and commonization to successors, this does not matter, but for
3954 the final sort needs to be stable. */
3962 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3963 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3966 /* Break the tie using hashing and source/bb index. */
3968 return (a->e != NULL
3969 ? a->e->src->index - b->e->src->index
3970 : a->bb->index - b->bb->index);
3971 return ha > hb ? 1 : -1;
3974 /* Process all the insertions registered for every name N_i registered
3975 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3976 found in ASSERTS_FOR[i]. */
3979 process_assert_insertions (void)
3983 bool update_edges_p = false;
3984 int num_asserts = 0;
3986 if (dump_file && (dump_flags & TDF_DETAILS))
3987 dump_all_asserts (dump_file);
3989 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3991 assert_locus *loc = asserts_for[i];
3994 auto_vec<assert_locus *, 16> asserts;
3995 for (; loc; loc = loc->next)
3996 asserts.safe_push (loc);
3997 asserts.qsort (compare_assert_loc<false>);
3999 /* Push down common asserts to successors and remove redundant ones. */
4001 assert_locus *common = NULL;
4002 unsigned commonj = 0;
4003 for (unsigned j = 0; j < asserts.length (); ++j)
4009 || loc->e->dest != common->e->dest
4010 || loc->comp_code != common->comp_code
4011 || ! operand_equal_p (loc->val, common->val, 0)
4012 || ! operand_equal_p (loc->expr, common->expr, 0))
4018 else if (loc->e == asserts[j-1]->e)
4020 /* Remove duplicate asserts. */
4021 if (commonj == j - 1)
4026 free (asserts[j-1]);
4027 asserts[j-1] = NULL;
4032 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4034 /* We have the same assertion on all incoming edges of a BB.
4035 Insert it at the beginning of that block. */
4036 loc->bb = loc->e->dest;
4038 loc->si = gsi_none ();
4040 /* Clear asserts commoned. */
4041 for (; commonj != j; ++commonj)
4042 if (asserts[commonj])
4044 free (asserts[commonj]);
4045 asserts[commonj] = NULL;
4051 /* The asserts vector sorting above might be unstable for
4052 -fcompare-debug, sort again to ensure a stable sort. */
4053 asserts.qsort (compare_assert_loc<true>);
4054 for (unsigned j = 0; j < asserts.length (); ++j)
4059 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4066 gsi_commit_edge_inserts ();
4068 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4073 /* Traverse the flowgraph looking for conditional jumps to insert range
4074 expressions. These range expressions are meant to provide information
4075 to optimizations that need to reason in terms of value ranges. They
4076 will not be expanded into RTL. For instance, given:
4085 this pass will transform the code into:
4091 x = ASSERT_EXPR <x, x < y>
4096 y = ASSERT_EXPR <y, x >= y>
4100 The idea is that once copy and constant propagation have run, other
4101 optimizations will be able to determine what ranges of values can 'x'
4102 take in different paths of the code, simply by checking the reaching
4103 definition of 'x'. */
4106 insert_range_assertions (void)
4108 need_assert_for = BITMAP_ALLOC (NULL);
4109 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4111 calculate_dominance_info (CDI_DOMINATORS);
4113 find_assert_locations ();
4114 if (!bitmap_empty_p (need_assert_for))
4116 process_assert_insertions ();
4117 update_ssa (TODO_update_ssa_no_phi);
4120 if (dump_file && (dump_flags & TDF_DETAILS))
4122 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4123 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4127 BITMAP_FREE (need_assert_for);
4130 class vrp_prop : public ssa_propagation_engine
4133 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4134 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4136 void vrp_initialize (void);
4137 void vrp_finalize (bool);
4138 void check_all_array_refs (void);
4139 void check_array_ref (location_t, tree, bool);
4140 void check_mem_ref (location_t, tree, bool);
4141 void search_for_addr_array (tree, location_t);
4143 class vr_values vr_values;
4144 /* Temporary delegator to minimize code churn. */
4145 value_range *get_value_range (const_tree op)
4146 { return vr_values.get_value_range (op); }
4147 void set_defs_to_varying (gimple *stmt)
4148 { return vr_values.set_defs_to_varying (stmt); }
4149 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4150 tree *output_p, value_range *vr)
4151 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4152 bool update_value_range (const_tree op, value_range *vr)
4153 { return vr_values.update_value_range (op, vr); }
4154 void extract_range_basic (value_range *vr, gimple *stmt)
4155 { vr_values.extract_range_basic (vr, stmt); }
4156 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4157 { vr_values.extract_range_from_phi_node (phi, vr); }
4159 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4160 and "struct" hacks. If VRP can determine that the
4161 array subscript is a constant, check if it is outside valid
4162 range. If the array subscript is a RANGE, warn if it is
4163 non-overlapping with valid range.
4164 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4167 vrp_prop::check_array_ref (location_t location, tree ref,
4168 bool ignore_off_by_one)
4170 const value_range *vr = NULL;
4171 tree low_sub, up_sub;
4172 tree low_bound, up_bound, up_bound_p1;
4174 if (TREE_NO_WARNING (ref))
4177 low_sub = up_sub = TREE_OPERAND (ref, 1);
4178 up_bound = array_ref_up_bound (ref);
4181 || TREE_CODE (up_bound) != INTEGER_CST
4182 || (warn_array_bounds < 2
4183 && array_at_struct_end_p (ref)))
4185 /* Accesses to trailing arrays via pointers may access storage
4186 beyond the types array bounds. For such arrays, or for flexible
4187 array members, as well as for other arrays of an unknown size,
4188 replace the upper bound with a more permissive one that assumes
4189 the size of the largest object is PTRDIFF_MAX. */
4190 tree eltsize = array_ref_element_size (ref);
4192 if (TREE_CODE (eltsize) != INTEGER_CST
4193 || integer_zerop (eltsize))
4195 up_bound = NULL_TREE;
4196 up_bound_p1 = NULL_TREE;
4200 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4201 tree arg = TREE_OPERAND (ref, 0);
4204 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4205 maxbound = wide_int_to_tree (sizetype,
4206 wi::sub (wi::to_wide (maxbound),
4209 maxbound = fold_convert (sizetype, maxbound);
4211 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4213 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4214 build_int_cst (ptrdiff_type_node, 1));
4218 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4219 build_int_cst (TREE_TYPE (up_bound), 1));
4221 low_bound = array_ref_low_bound (ref);
4223 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4225 bool warned = false;
4228 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4229 warned = warning_at (location, OPT_Warray_bounds,
4230 "array subscript %E is above array bounds of %qT",
4233 if (TREE_CODE (low_sub) == SSA_NAME)
4235 vr = get_value_range (low_sub);
4236 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4238 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4239 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4243 if (vr && vr->type == VR_ANTI_RANGE)
4246 && TREE_CODE (up_sub) == INTEGER_CST
4247 && (ignore_off_by_one
4248 ? tree_int_cst_lt (up_bound, up_sub)
4249 : tree_int_cst_le (up_bound, up_sub))
4250 && TREE_CODE (low_sub) == INTEGER_CST
4251 && tree_int_cst_le (low_sub, low_bound))
4252 warned = warning_at (location, OPT_Warray_bounds,
4253 "array subscript [%E, %E] is outside "
4254 "array bounds of %qT",
4255 low_sub, up_sub, artype);
4258 && TREE_CODE (up_sub) == INTEGER_CST
4259 && (ignore_off_by_one
4260 ? !tree_int_cst_le (up_sub, up_bound_p1)
4261 : !tree_int_cst_le (up_sub, up_bound)))
4263 if (dump_file && (dump_flags & TDF_DETAILS))
4265 fprintf (dump_file, "Array bound warning for ");
4266 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4267 fprintf (dump_file, "\n");
4269 warned = warning_at (location, OPT_Warray_bounds,
4270 "array subscript %E is above array bounds of %qT",
4273 else if (TREE_CODE (low_sub) == INTEGER_CST
4274 && tree_int_cst_lt (low_sub, low_bound))
4276 if (dump_file && (dump_flags & TDF_DETAILS))
4278 fprintf (dump_file, "Array bound warning for ");
4279 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4280 fprintf (dump_file, "\n");
4282 warned = warning_at (location, OPT_Warray_bounds,
4283 "array subscript %E is below array bounds of %qT",
4289 ref = TREE_OPERAND (ref, 0);
4292 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4294 TREE_NO_WARNING (ref) = 1;
4298 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4299 references to string constants. If VRP can determine that the array
4300 subscript is a constant, check if it is outside valid range.
4301 If the array subscript is a RANGE, warn if it is non-overlapping
4303 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4304 (used to allow one-past-the-end indices for code that takes
4305 the address of the just-past-the-end element of an array). */
4308 vrp_prop::check_mem_ref (location_t location, tree ref,
4309 bool ignore_off_by_one)
4311 if (TREE_NO_WARNING (ref))
4314 tree arg = TREE_OPERAND (ref, 0);
4315 /* The constant and variable offset of the reference. */
4316 tree cstoff = TREE_OPERAND (ref, 1);
4317 tree varoff = NULL_TREE;
4319 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4321 /* The array or string constant bounds in bytes. Initially set
4322 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4324 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4326 /* The minimum and maximum intermediate offset. For a reference
4327 to be valid, not only does the final offset/subscript must be
4328 in bounds but all intermediate offsets should be as well.
4329 GCC may be able to deal gracefully with such out-of-bounds
4330 offsets so the checking is only enbaled at -Warray-bounds=2
4331 where it may help detect bugs in uses of the intermediate
4332 offsets that could otherwise not be detectable. */
4333 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4334 offset_int extrema[2] = { 0, wi::abs (ioff) };
4336 /* The range of the byte offset into the reference. */
4337 offset_int offrange[2] = { 0, 0 };
4339 const value_range *vr = NULL;
4341 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4342 The loop computes the the range of the final offset for expressions
4343 such as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs
4345 while (TREE_CODE (arg) == SSA_NAME)
4347 gimple *def = SSA_NAME_DEF_STMT (arg);
4348 if (!is_gimple_assign (def))
4351 tree_code code = gimple_assign_rhs_code (def);
4352 if (code == POINTER_PLUS_EXPR)
4354 arg = gimple_assign_rhs1 (def);
4355 varoff = gimple_assign_rhs2 (def);
4357 else if (code == ASSERT_EXPR)
4359 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4365 /* VAROFF should always be a SSA_NAME here (and not even
4366 INTEGER_CST) but there's no point in taking chances. */
4367 if (TREE_CODE (varoff) != SSA_NAME)
4370 vr = get_value_range (varoff);
4371 if (!vr || vr->type == VR_UNDEFINED || !vr->min || !vr->max)
4374 if (TREE_CODE (vr->min) != INTEGER_CST
4375 || TREE_CODE (vr->max) != INTEGER_CST)
4378 if (vr->type == VR_RANGE)
4380 if (tree_int_cst_lt (vr->min, vr->max))
4383 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min));
4385 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max));
4399 /* Conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4401 offrange[0] += arrbounds[0];
4402 offrange[1] += arrbounds[1];
4407 /* For an anti-range, analogously to the above, conservatively
4408 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4409 offrange[0] += arrbounds[0];
4410 offrange[1] += arrbounds[1];
4413 /* Keep track of the minimum and maximum offset. */
4414 if (offrange[1] < 0 && offrange[1] < extrema[0])
4415 extrema[0] = offrange[1];
4416 if (offrange[0] > 0 && offrange[0] > extrema[1])
4417 extrema[1] = offrange[0];
4419 if (offrange[0] < arrbounds[0])
4420 offrange[0] = arrbounds[0];
4422 if (offrange[1] > arrbounds[1])
4423 offrange[1] = arrbounds[1];
4426 if (TREE_CODE (arg) == ADDR_EXPR)
4428 arg = TREE_OPERAND (arg, 0);
4429 if (TREE_CODE (arg) != STRING_CST
4430 && TREE_CODE (arg) != VAR_DECL)
4436 /* The type of the object being referred to. It can be an array,
4437 string literal, or a non-array type when the MEM_REF represents
4438 a reference/subscript via a pointer to an object that is not
4439 an element of an array. References to members of structs and
4440 unions are excluded because MEM_REF doesn't make it possible
4441 to identify the member where the reference originated.
4442 Incomplete types are excluded as well because their size is
4444 tree reftype = TREE_TYPE (arg);
4445 if (POINTER_TYPE_P (reftype)
4446 || !COMPLETE_TYPE_P (reftype)
4447 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4448 || RECORD_OR_UNION_TYPE_P (reftype))
4452 if (TREE_CODE (reftype) == ARRAY_TYPE)
4454 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4456 if (tree dom = TYPE_DOMAIN (reftype))
4458 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4459 if (array_at_struct_end_p (arg)
4460 || !bnds[0] || !bnds[1])
4463 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4467 arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4468 arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4474 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4477 if (TREE_CODE (ref) == MEM_REF)
4479 /* For MEM_REF determine a tighter bound of the non-array
4481 tree eltype = TREE_TYPE (reftype);
4482 while (TREE_CODE (eltype) == ARRAY_TYPE)
4483 eltype = TREE_TYPE (eltype);
4484 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4491 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4494 offrange[0] += ioff;
4495 offrange[1] += ioff;
4497 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4498 is set (when taking the address of the one-past-last element
4499 of an array) but always use the stricter bound in diagnostics. */
4500 offset_int ubound = arrbounds[1];
4501 if (ignore_off_by_one)
4504 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4506 /* Treat a reference to a non-array object as one to an array
4507 of a single element. */
4508 if (TREE_CODE (reftype) != ARRAY_TYPE)
4509 reftype = build_array_type_nelts (reftype, 1);
4511 if (TREE_CODE (ref) == MEM_REF)
4513 /* Extract the element type out of MEM_REF and use its size
4514 to compute the index to print in the diagnostic; arrays
4515 in MEM_REF don't mean anything. */
4516 tree type = TREE_TYPE (ref);
4517 while (TREE_CODE (type) == ARRAY_TYPE)
4518 type = TREE_TYPE (type);
4519 tree size = TYPE_SIZE_UNIT (type);
4520 offrange[0] = offrange[0] / wi::to_offset (size);
4521 offrange[1] = offrange[1] / wi::to_offset (size);
4525 /* For anything other than MEM_REF, compute the index to
4526 print in the diagnostic as the offset over element size. */
4527 offrange[0] = offrange[0] / eltsize;
4528 offrange[1] = offrange[1] / eltsize;
4532 if (offrange[0] == offrange[1])
4533 warned = warning_at (location, OPT_Warray_bounds,
4534 "array subscript %wi is outside array bounds "
4536 offrange[0].to_shwi (), reftype);
4538 warned = warning_at (location, OPT_Warray_bounds,
4539 "array subscript [%wi, %wi] is outside "
4540 "array bounds of %qT",
4541 offrange[0].to_shwi (),
4542 offrange[1].to_shwi (), reftype);
4543 if (warned && DECL_P (arg))
4544 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4546 TREE_NO_WARNING (ref) = 1;
4550 if (warn_array_bounds < 2)
4553 /* At level 2 check also intermediate offsets. */
4555 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4557 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4559 warning_at (location, OPT_Warray_bounds,
4560 "intermediate array offset %wi is outside array bounds "
4563 TREE_NO_WARNING (ref) = 1;
4567 /* Searches if the expr T, located at LOCATION computes
4568 address of an ARRAY_REF, and call check_array_ref on it. */
4571 vrp_prop::search_for_addr_array (tree t, location_t location)
4573 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4576 if (TREE_CODE (t) == ARRAY_REF)
4577 check_array_ref (location, t, true /*ignore_off_by_one*/);
4578 else if (TREE_CODE (t) == MEM_REF)
4579 check_mem_ref (location, t, true /*ignore_off_by_one*/);
4581 t = TREE_OPERAND (t, 0);
4583 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4585 if (TREE_CODE (t) != MEM_REF
4586 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4587 || TREE_NO_WARNING (t))
4590 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4591 tree low_bound, up_bound, el_sz;
4592 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4593 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4594 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4597 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4598 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4599 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4601 || TREE_CODE (low_bound) != INTEGER_CST
4603 || TREE_CODE (up_bound) != INTEGER_CST
4605 || TREE_CODE (el_sz) != INTEGER_CST)
4609 if (!mem_ref_offset (t).is_constant (&idx))
4612 bool warned = false;
4613 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4616 if (dump_file && (dump_flags & TDF_DETAILS))
4618 fprintf (dump_file, "Array bound warning for ");
4619 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4620 fprintf (dump_file, "\n");
4622 warned = warning_at (location, OPT_Warray_bounds,
4623 "array subscript %wi is below "
4624 "array bounds of %qT",
4625 idx.to_shwi (), TREE_TYPE (tem));
4627 else if (idx > (wi::to_offset (up_bound)
4628 - wi::to_offset (low_bound) + 1))
4630 if (dump_file && (dump_flags & TDF_DETAILS))
4632 fprintf (dump_file, "Array bound warning for ");
4633 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4634 fprintf (dump_file, "\n");
4636 warned = warning_at (location, OPT_Warray_bounds,
4637 "array subscript %wu is above "
4638 "array bounds of %qT",
4639 idx.to_uhwi (), TREE_TYPE (tem));
4645 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4647 TREE_NO_WARNING (t) = 1;
4651 /* walk_tree() callback that checks if *TP is
4652 an ARRAY_REF inside an ADDR_EXPR (in which an array
4653 subscript one outside the valid range is allowed). Call
4654 check_array_ref for each ARRAY_REF found. The location is
4658 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4661 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4662 location_t location;
4664 if (EXPR_HAS_LOCATION (t))
4665 location = EXPR_LOCATION (t);
4667 location = gimple_location (wi->stmt);
4669 *walk_subtree = TRUE;
4671 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4672 if (TREE_CODE (t) == ARRAY_REF)
4673 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4674 else if (TREE_CODE (t) == MEM_REF)
4675 vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4676 else if (TREE_CODE (t) == ADDR_EXPR)
4678 vrp_prop->search_for_addr_array (t, location);
4679 *walk_subtree = FALSE;
4685 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4686 to walk over all statements of all reachable BBs and call
4687 check_array_bounds on them. */
4689 class check_array_bounds_dom_walker : public dom_walker
4692 check_array_bounds_dom_walker (vrp_prop *prop)
4693 : dom_walker (CDI_DOMINATORS,
4694 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4695 flags, so that we can merge in information on
4696 non-executable edges from vrp_folder . */
4697 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4699 ~check_array_bounds_dom_walker () {}
4701 edge before_dom_children (basic_block) FINAL OVERRIDE;
4707 /* Implementation of dom_walker::before_dom_children.
4709 Walk over all statements of BB and call check_array_bounds on them,
4710 and determine if there's a unique successor edge. */
4713 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4715 gimple_stmt_iterator si;
4716 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4718 gimple *stmt = gsi_stmt (si);
4719 struct walk_stmt_info wi;
4720 if (!gimple_has_location (stmt)
4721 || is_gimple_debug (stmt))
4724 memset (&wi, 0, sizeof (wi));
4728 walk_gimple_op (stmt, check_array_bounds, &wi);
4731 /* Determine if there's a unique successor edge, and if so, return
4732 that back to dom_walker, ensuring that we don't visit blocks that
4733 became unreachable during the VRP propagation
4734 (PR tree-optimization/83312). */
4735 return find_taken_edge (bb, NULL_TREE);
4738 /* Walk over all statements of all reachable BBs and call check_array_bounds
4742 vrp_prop::check_all_array_refs ()
4744 check_array_bounds_dom_walker w (this);
4745 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4748 /* Return true if all imm uses of VAR are either in STMT, or
4749 feed (optionally through a chain of single imm uses) GIMPLE_COND
4750 in basic block COND_BB. */
4753 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4755 use_operand_p use_p, use2_p;
4756 imm_use_iterator iter;
4758 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4759 if (USE_STMT (use_p) != stmt)
4761 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4762 if (is_gimple_debug (use_stmt))
4764 while (is_gimple_assign (use_stmt)
4765 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4766 && single_imm_use (gimple_assign_lhs (use_stmt),
4767 &use2_p, &use_stmt2))
4768 use_stmt = use_stmt2;
4769 if (gimple_code (use_stmt) != GIMPLE_COND
4770 || gimple_bb (use_stmt) != cond_bb)
4783 __builtin_unreachable ();
4785 x_5 = ASSERT_EXPR <x_3, ...>;
4786 If x_3 has no other immediate uses (checked by caller),
4787 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4788 from the non-zero bitmask. */
4791 maybe_set_nonzero_bits (edge e, tree var)
4793 basic_block cond_bb = e->src;
4794 gimple *stmt = last_stmt (cond_bb);
4798 || gimple_code (stmt) != GIMPLE_COND
4799 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4800 ? EQ_EXPR : NE_EXPR)
4801 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4802 || !integer_zerop (gimple_cond_rhs (stmt)))
4805 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4806 if (!is_gimple_assign (stmt)
4807 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4808 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4810 if (gimple_assign_rhs1 (stmt) != var)
4814 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4816 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4817 if (!gimple_assign_cast_p (stmt2)
4818 || gimple_assign_rhs1 (stmt2) != var
4819 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4820 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4821 != TYPE_PRECISION (TREE_TYPE (var))))
4824 cst = gimple_assign_rhs2 (stmt);
4825 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4826 wi::to_wide (cst)));
4829 /* Convert range assertion expressions into the implied copies and
4830 copy propagate away the copies. Doing the trivial copy propagation
4831 here avoids the need to run the full copy propagation pass after
4834 FIXME, this will eventually lead to copy propagation removing the
4835 names that had useful range information attached to them. For
4836 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4837 then N_i will have the range [3, +INF].
4839 However, by converting the assertion into the implied copy
4840 operation N_i = N_j, we will then copy-propagate N_j into the uses
4841 of N_i and lose the range information. We may want to hold on to
4842 ASSERT_EXPRs a little while longer as the ranges could be used in
4843 things like jump threading.
4845 The problem with keeping ASSERT_EXPRs around is that passes after
4846 VRP need to handle them appropriately.
4848 Another approach would be to make the range information a first
4849 class property of the SSA_NAME so that it can be queried from
4850 any pass. This is made somewhat more complex by the need for
4851 multiple ranges to be associated with one SSA_NAME. */
4854 remove_range_assertions (void)
4857 gimple_stmt_iterator si;
4858 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4859 a basic block preceeded by GIMPLE_COND branching to it and
4860 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4863 /* Note that the BSI iterator bump happens at the bottom of the
4864 loop and no bump is necessary if we're removing the statement
4865 referenced by the current BSI. */
4866 FOR_EACH_BB_FN (bb, cfun)
4867 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4869 gimple *stmt = gsi_stmt (si);
4871 if (is_gimple_assign (stmt)
4872 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4874 tree lhs = gimple_assign_lhs (stmt);
4875 tree rhs = gimple_assign_rhs1 (stmt);
4878 var = ASSERT_EXPR_VAR (rhs);
4880 if (TREE_CODE (var) == SSA_NAME
4881 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4882 && SSA_NAME_RANGE_INFO (lhs))
4884 if (is_unreachable == -1)
4887 if (single_pred_p (bb)
4888 && assert_unreachable_fallthru_edge_p
4889 (single_pred_edge (bb)))
4893 if (x_7 >= 10 && x_7 < 20)
4894 __builtin_unreachable ();
4895 x_8 = ASSERT_EXPR <x_7, ...>;
4896 if the only uses of x_7 are in the ASSERT_EXPR and
4897 in the condition. In that case, we can copy the
4898 range info from x_8 computed in this pass also
4901 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4904 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4905 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4906 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4907 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4911 /* Propagate the RHS into every use of the LHS. For SSA names
4912 also propagate abnormals as it merely restores the original
4913 IL in this case (an replace_uses_by would assert). */
4914 if (TREE_CODE (var) == SSA_NAME)
4916 imm_use_iterator iter;
4917 use_operand_p use_p;
4919 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4920 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4921 SET_USE (use_p, var);
4924 replace_uses_by (lhs, var);
4926 /* And finally, remove the copy, it is not needed. */
4927 gsi_remove (&si, true);
4928 release_defs (stmt);
4932 if (!is_gimple_debug (gsi_stmt (si)))
4939 /* Return true if STMT is interesting for VRP. */
4942 stmt_interesting_for_vrp (gimple *stmt)
4944 if (gimple_code (stmt) == GIMPLE_PHI)
4946 tree res = gimple_phi_result (stmt);
4947 return (!virtual_operand_p (res)
4948 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4949 || POINTER_TYPE_P (TREE_TYPE (res))));
4951 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4953 tree lhs = gimple_get_lhs (stmt);
4955 /* In general, assignments with virtual operands are not useful
4956 for deriving ranges, with the obvious exception of calls to
4957 builtin functions. */
4958 if (lhs && TREE_CODE (lhs) == SSA_NAME
4959 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4960 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4961 && (is_gimple_call (stmt)
4962 || !gimple_vuse (stmt)))
4964 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4965 switch (gimple_call_internal_fn (stmt))
4967 case IFN_ADD_OVERFLOW:
4968 case IFN_SUB_OVERFLOW:
4969 case IFN_MUL_OVERFLOW:
4970 case IFN_ATOMIC_COMPARE_EXCHANGE:
4971 /* These internal calls return _Complex integer type,
4972 but are interesting to VRP nevertheless. */
4973 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4980 else if (gimple_code (stmt) == GIMPLE_COND
4981 || gimple_code (stmt) == GIMPLE_SWITCH)
4987 /* Initialization required by ssa_propagate engine. */
4990 vrp_prop::vrp_initialize ()
4994 FOR_EACH_BB_FN (bb, cfun)
4996 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4999 gphi *phi = si.phi ();
5000 if (!stmt_interesting_for_vrp (phi))
5002 tree lhs = PHI_RESULT (phi);
5003 set_value_range_to_varying (get_value_range (lhs));
5004 prop_set_simulate_again (phi, false);
5007 prop_set_simulate_again (phi, true);
5010 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5013 gimple *stmt = gsi_stmt (si);
5015 /* If the statement is a control insn, then we do not
5016 want to avoid simulating the statement once. Failure
5017 to do so means that those edges will never get added. */
5018 if (stmt_ends_bb_p (stmt))
5019 prop_set_simulate_again (stmt, true);
5020 else if (!stmt_interesting_for_vrp (stmt))
5022 set_defs_to_varying (stmt);
5023 prop_set_simulate_again (stmt, false);
5026 prop_set_simulate_again (stmt, true);
5031 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5032 that includes the value VAL. The search is restricted to the range
5033 [START_IDX, n - 1] where n is the size of VEC.
5035 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5038 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5039 it is placed in IDX and false is returned.
5041 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5045 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5047 size_t n = gimple_switch_num_labels (stmt);
5050 /* Find case label for minimum of the value range or the next one.
5051 At each iteration we are searching in [low, high - 1]. */
5053 for (low = start_idx, high = n; high != low; )
5057 /* Note that i != high, so we never ask for n. */
5058 size_t i = (high + low) / 2;
5059 t = gimple_switch_label (stmt, i);
5061 /* Cache the result of comparing CASE_LOW and val. */
5062 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5066 /* Ranges cannot be empty. */
5075 if (CASE_HIGH (t) != NULL
5076 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5088 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5089 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5090 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5091 then MAX_IDX < MIN_IDX.
5092 Returns true if the default label is not needed. */
5095 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5099 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5100 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5104 && max_take_default)
5106 /* Only the default case label reached.
5107 Return an empty range. */
5114 bool take_default = min_take_default || max_take_default;
5118 if (max_take_default)
5121 /* If the case label range is continuous, we do not need
5122 the default case label. Verify that. */
5123 high = CASE_LOW (gimple_switch_label (stmt, i));
5124 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5125 high = CASE_HIGH (gimple_switch_label (stmt, i));
5126 for (k = i + 1; k <= j; ++k)
5128 low = CASE_LOW (gimple_switch_label (stmt, k));
5129 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5131 take_default = true;
5135 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5136 high = CASE_HIGH (gimple_switch_label (stmt, k));
5141 return !take_default;
5145 /* Evaluate statement STMT. If the statement produces a useful range,
5146 return SSA_PROP_INTERESTING and record the SSA name with the
5147 interesting range into *OUTPUT_P.
5149 If STMT is a conditional branch and we can determine its truth
5150 value, the taken edge is recorded in *TAKEN_EDGE_P.
5152 If STMT produces a varying value, return SSA_PROP_VARYING. */
5154 enum ssa_prop_result
5155 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5157 value_range vr = VR_INITIALIZER;
5158 tree lhs = gimple_get_lhs (stmt);
5159 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5163 if (update_value_range (*output_p, &vr))
5165 if (dump_file && (dump_flags & TDF_DETAILS))
5167 fprintf (dump_file, "Found new range for ");
5168 print_generic_expr (dump_file, *output_p);
5169 fprintf (dump_file, ": ");
5170 dump_value_range (dump_file, &vr);
5171 fprintf (dump_file, "\n");
5174 if (vr.type == VR_VARYING)
5175 return SSA_PROP_VARYING;
5177 return SSA_PROP_INTERESTING;
5179 return SSA_PROP_NOT_INTERESTING;
5182 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5183 switch (gimple_call_internal_fn (stmt))
5185 case IFN_ADD_OVERFLOW:
5186 case IFN_SUB_OVERFLOW:
5187 case IFN_MUL_OVERFLOW:
5188 case IFN_ATOMIC_COMPARE_EXCHANGE:
5189 /* These internal calls return _Complex integer type,
5190 which VRP does not track, but the immediate uses
5191 thereof might be interesting. */
5192 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5194 imm_use_iterator iter;
5195 use_operand_p use_p;
5196 enum ssa_prop_result res = SSA_PROP_VARYING;
5198 set_value_range_to_varying (get_value_range (lhs));
5200 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5202 gimple *use_stmt = USE_STMT (use_p);
5203 if (!is_gimple_assign (use_stmt))
5205 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5206 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5208 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5209 tree use_lhs = gimple_assign_lhs (use_stmt);
5210 if (TREE_CODE (rhs1) != rhs_code
5211 || TREE_OPERAND (rhs1, 0) != lhs
5212 || TREE_CODE (use_lhs) != SSA_NAME
5213 || !stmt_interesting_for_vrp (use_stmt)
5214 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5215 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5216 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5219 /* If there is a change in the value range for any of the
5220 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5221 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5222 or IMAGPART_EXPR immediate uses, but none of them have
5223 a change in their value ranges, return
5224 SSA_PROP_NOT_INTERESTING. If there are no
5225 {REAL,IMAG}PART_EXPR uses at all,
5226 return SSA_PROP_VARYING. */
5227 value_range new_vr = VR_INITIALIZER;
5228 extract_range_basic (&new_vr, use_stmt);
5229 const value_range *old_vr = get_value_range (use_lhs);
5230 if (old_vr->type != new_vr.type
5231 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5232 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5233 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5234 res = SSA_PROP_INTERESTING;
5236 res = SSA_PROP_NOT_INTERESTING;
5237 BITMAP_FREE (new_vr.equiv);
5238 if (res == SSA_PROP_INTERESTING)
5252 /* All other statements produce nothing of interest for VRP, so mark
5253 their outputs varying and prevent further simulation. */
5254 set_defs_to_varying (stmt);
5256 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5259 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5260 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5261 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5262 possible such range. The resulting range is not canonicalized. */
5265 union_ranges (enum value_range_type *vr0type,
5266 tree *vr0min, tree *vr0max,
5267 enum value_range_type vr1type,
5268 tree vr1min, tree vr1max)
5270 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5271 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5273 /* [] is vr0, () is vr1 in the following classification comments. */
5277 if (*vr0type == vr1type)
5278 /* Nothing to do for equal ranges. */
5280 else if ((*vr0type == VR_RANGE
5281 && vr1type == VR_ANTI_RANGE)
5282 || (*vr0type == VR_ANTI_RANGE
5283 && vr1type == VR_RANGE))
5285 /* For anti-range with range union the result is varying. */
5291 else if (operand_less_p (*vr0max, vr1min) == 1
5292 || operand_less_p (vr1max, *vr0min) == 1)
5294 /* [ ] ( ) or ( ) [ ]
5295 If the ranges have an empty intersection, result of the union
5296 operation is the anti-range or if both are anti-ranges
5298 if (*vr0type == VR_ANTI_RANGE
5299 && vr1type == VR_ANTI_RANGE)
5301 else if (*vr0type == VR_ANTI_RANGE
5302 && vr1type == VR_RANGE)
5304 else if (*vr0type == VR_RANGE
5305 && vr1type == VR_ANTI_RANGE)
5311 else if (*vr0type == VR_RANGE
5312 && vr1type == VR_RANGE)
5314 /* The result is the convex hull of both ranges. */
5315 if (operand_less_p (*vr0max, vr1min) == 1)
5317 /* If the result can be an anti-range, create one. */
5318 if (TREE_CODE (*vr0max) == INTEGER_CST
5319 && TREE_CODE (vr1min) == INTEGER_CST
5320 && vrp_val_is_min (*vr0min)
5321 && vrp_val_is_max (vr1max))
5323 tree min = int_const_binop (PLUS_EXPR,
5325 build_int_cst (TREE_TYPE (*vr0max), 1));
5326 tree max = int_const_binop (MINUS_EXPR,
5328 build_int_cst (TREE_TYPE (vr1min), 1));
5329 if (!operand_less_p (max, min))
5331 *vr0type = VR_ANTI_RANGE;
5343 /* If the result can be an anti-range, create one. */
5344 if (TREE_CODE (vr1max) == INTEGER_CST
5345 && TREE_CODE (*vr0min) == INTEGER_CST
5346 && vrp_val_is_min (vr1min)
5347 && vrp_val_is_max (*vr0max))
5349 tree min = int_const_binop (PLUS_EXPR,
5351 build_int_cst (TREE_TYPE (vr1max), 1));
5352 tree max = int_const_binop (MINUS_EXPR,
5354 build_int_cst (TREE_TYPE (*vr0min), 1));
5355 if (!operand_less_p (max, min))
5357 *vr0type = VR_ANTI_RANGE;
5371 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5372 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5374 /* [ ( ) ] or [( ) ] or [ ( )] */
5375 if (*vr0type == VR_RANGE
5376 && vr1type == VR_RANGE)
5378 else if (*vr0type == VR_ANTI_RANGE
5379 && vr1type == VR_ANTI_RANGE)
5385 else if (*vr0type == VR_ANTI_RANGE
5386 && vr1type == VR_RANGE)
5388 /* Arbitrarily choose the right or left gap. */
5389 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5390 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5391 build_int_cst (TREE_TYPE (vr1min), 1));
5392 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5393 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5394 build_int_cst (TREE_TYPE (vr1max), 1));
5398 else if (*vr0type == VR_RANGE
5399 && vr1type == VR_ANTI_RANGE)
5400 /* The result covers everything. */
5405 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5406 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5408 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5409 if (*vr0type == VR_RANGE
5410 && vr1type == VR_RANGE)
5416 else if (*vr0type == VR_ANTI_RANGE
5417 && vr1type == VR_ANTI_RANGE)
5419 else if (*vr0type == VR_RANGE
5420 && vr1type == VR_ANTI_RANGE)
5422 *vr0type = VR_ANTI_RANGE;
5423 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5425 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5426 build_int_cst (TREE_TYPE (*vr0min), 1));
5429 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5431 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5432 build_int_cst (TREE_TYPE (*vr0max), 1));
5438 else if (*vr0type == VR_ANTI_RANGE
5439 && vr1type == VR_RANGE)
5440 /* The result covers everything. */
5445 else if ((operand_less_p (vr1min, *vr0max) == 1
5446 || operand_equal_p (vr1min, *vr0max, 0))
5447 && operand_less_p (*vr0min, vr1min) == 1
5448 && operand_less_p (*vr0max, vr1max) == 1)
5450 /* [ ( ] ) or [ ]( ) */
5451 if (*vr0type == VR_RANGE
5452 && vr1type == VR_RANGE)
5454 else if (*vr0type == VR_ANTI_RANGE
5455 && vr1type == VR_ANTI_RANGE)
5457 else if (*vr0type == VR_ANTI_RANGE
5458 && vr1type == VR_RANGE)
5460 if (TREE_CODE (vr1min) == INTEGER_CST)
5461 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5462 build_int_cst (TREE_TYPE (vr1min), 1));
5466 else if (*vr0type == VR_RANGE
5467 && vr1type == VR_ANTI_RANGE)
5469 if (TREE_CODE (*vr0max) == INTEGER_CST)
5472 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5473 build_int_cst (TREE_TYPE (*vr0max), 1));
5482 else if ((operand_less_p (*vr0min, vr1max) == 1
5483 || operand_equal_p (*vr0min, vr1max, 0))
5484 && operand_less_p (vr1min, *vr0min) == 1
5485 && operand_less_p (vr1max, *vr0max) == 1)
5487 /* ( [ ) ] or ( )[ ] */
5488 if (*vr0type == VR_RANGE
5489 && vr1type == VR_RANGE)
5491 else if (*vr0type == VR_ANTI_RANGE
5492 && vr1type == VR_ANTI_RANGE)
5494 else if (*vr0type == VR_ANTI_RANGE
5495 && vr1type == VR_RANGE)
5497 if (TREE_CODE (vr1max) == INTEGER_CST)
5498 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5499 build_int_cst (TREE_TYPE (vr1max), 1));
5503 else if (*vr0type == VR_RANGE
5504 && vr1type == VR_ANTI_RANGE)
5506 if (TREE_CODE (*vr0min) == INTEGER_CST)
5509 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5510 build_int_cst (TREE_TYPE (*vr0min), 1));
5525 *vr0type = VR_VARYING;
5526 *vr0min = NULL_TREE;
5527 *vr0max = NULL_TREE;
5530 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5531 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5532 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5533 possible such range. The resulting range is not canonicalized. */
5536 intersect_ranges (enum value_range_type *vr0type,
5537 tree *vr0min, tree *vr0max,
5538 enum value_range_type vr1type,
5539 tree vr1min, tree vr1max)
5541 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5542 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5544 /* [] is vr0, () is vr1 in the following classification comments. */
5548 if (*vr0type == vr1type)
5549 /* Nothing to do for equal ranges. */
5551 else if ((*vr0type == VR_RANGE
5552 && vr1type == VR_ANTI_RANGE)
5553 || (*vr0type == VR_ANTI_RANGE
5554 && vr1type == VR_RANGE))
5556 /* For anti-range with range intersection the result is empty. */
5557 *vr0type = VR_UNDEFINED;
5558 *vr0min = NULL_TREE;
5559 *vr0max = NULL_TREE;
5564 else if (operand_less_p (*vr0max, vr1min) == 1
5565 || operand_less_p (vr1max, *vr0min) == 1)
5567 /* [ ] ( ) or ( ) [ ]
5568 If the ranges have an empty intersection, the result of the
5569 intersect operation is the range for intersecting an
5570 anti-range with a range or empty when intersecting two ranges. */
5571 if (*vr0type == VR_RANGE
5572 && vr1type == VR_ANTI_RANGE)
5574 else if (*vr0type == VR_ANTI_RANGE
5575 && vr1type == VR_RANGE)
5581 else if (*vr0type == VR_RANGE
5582 && vr1type == VR_RANGE)
5584 *vr0type = VR_UNDEFINED;
5585 *vr0min = NULL_TREE;
5586 *vr0max = NULL_TREE;
5588 else if (*vr0type == VR_ANTI_RANGE
5589 && vr1type == VR_ANTI_RANGE)
5591 /* If the anti-ranges are adjacent to each other merge them. */
5592 if (TREE_CODE (*vr0max) == INTEGER_CST
5593 && TREE_CODE (vr1min) == INTEGER_CST
5594 && operand_less_p (*vr0max, vr1min) == 1
5595 && integer_onep (int_const_binop (MINUS_EXPR,
5598 else if (TREE_CODE (vr1max) == INTEGER_CST
5599 && TREE_CODE (*vr0min) == INTEGER_CST
5600 && operand_less_p (vr1max, *vr0min) == 1
5601 && integer_onep (int_const_binop (MINUS_EXPR,
5604 /* Else arbitrarily take VR0. */
5607 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5608 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5610 /* [ ( ) ] or [( ) ] or [ ( )] */
5611 if (*vr0type == VR_RANGE
5612 && vr1type == VR_RANGE)
5614 /* If both are ranges the result is the inner one. */
5619 else if (*vr0type == VR_RANGE
5620 && vr1type == VR_ANTI_RANGE)
5622 /* Choose the right gap if the left one is empty. */
5625 if (TREE_CODE (vr1max) != INTEGER_CST)
5627 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5628 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5630 = int_const_binop (MINUS_EXPR, vr1max,
5631 build_int_cst (TREE_TYPE (vr1max), -1));
5634 = int_const_binop (PLUS_EXPR, vr1max,
5635 build_int_cst (TREE_TYPE (vr1max), 1));
5637 /* Choose the left gap if the right one is empty. */
5640 if (TREE_CODE (vr1min) != INTEGER_CST)
5642 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5643 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5645 = int_const_binop (PLUS_EXPR, vr1min,
5646 build_int_cst (TREE_TYPE (vr1min), -1));
5649 = int_const_binop (MINUS_EXPR, vr1min,
5650 build_int_cst (TREE_TYPE (vr1min), 1));
5652 /* Choose the anti-range if the range is effectively varying. */
5653 else if (vrp_val_is_min (*vr0min)
5654 && vrp_val_is_max (*vr0max))
5660 /* Else choose the range. */
5662 else if (*vr0type == VR_ANTI_RANGE
5663 && vr1type == VR_ANTI_RANGE)
5664 /* If both are anti-ranges the result is the outer one. */
5666 else if (*vr0type == VR_ANTI_RANGE
5667 && vr1type == VR_RANGE)
5669 /* The intersection is empty. */
5670 *vr0type = VR_UNDEFINED;
5671 *vr0min = NULL_TREE;
5672 *vr0max = NULL_TREE;
5677 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5678 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5680 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5681 if (*vr0type == VR_RANGE
5682 && vr1type == VR_RANGE)
5683 /* Choose the inner range. */
5685 else if (*vr0type == VR_ANTI_RANGE
5686 && vr1type == VR_RANGE)
5688 /* Choose the right gap if the left is empty. */
5691 *vr0type = VR_RANGE;
5692 if (TREE_CODE (*vr0max) != INTEGER_CST)
5694 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5695 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5697 = int_const_binop (MINUS_EXPR, *vr0max,
5698 build_int_cst (TREE_TYPE (*vr0max), -1));
5701 = int_const_binop (PLUS_EXPR, *vr0max,
5702 build_int_cst (TREE_TYPE (*vr0max), 1));
5705 /* Choose the left gap if the right is empty. */
5708 *vr0type = VR_RANGE;
5709 if (TREE_CODE (*vr0min) != INTEGER_CST)
5711 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5712 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5714 = int_const_binop (PLUS_EXPR, *vr0min,
5715 build_int_cst (TREE_TYPE (*vr0min), -1));
5718 = int_const_binop (MINUS_EXPR, *vr0min,
5719 build_int_cst (TREE_TYPE (*vr0min), 1));
5722 /* Choose the anti-range if the range is effectively varying. */
5723 else if (vrp_val_is_min (vr1min)
5724 && vrp_val_is_max (vr1max))
5726 /* Choose the anti-range if it is ~[0,0], that range is special
5727 enough to special case when vr1's range is relatively wide.
5728 At least for types bigger than int - this covers pointers
5729 and arguments to functions like ctz. */
5730 else if (*vr0min == *vr0max
5731 && integer_zerop (*vr0min)
5732 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5733 >= TYPE_PRECISION (integer_type_node))
5734 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5735 && TREE_CODE (vr1max) == INTEGER_CST
5736 && TREE_CODE (vr1min) == INTEGER_CST
5737 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5738 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5740 /* Else choose the range. */
5748 else if (*vr0type == VR_ANTI_RANGE
5749 && vr1type == VR_ANTI_RANGE)
5751 /* If both are anti-ranges the result is the outer one. */
5756 else if (vr1type == VR_ANTI_RANGE
5757 && *vr0type == VR_RANGE)
5759 /* The intersection is empty. */
5760 *vr0type = VR_UNDEFINED;
5761 *vr0min = NULL_TREE;
5762 *vr0max = NULL_TREE;
5767 else if ((operand_less_p (vr1min, *vr0max) == 1
5768 || operand_equal_p (vr1min, *vr0max, 0))
5769 && operand_less_p (*vr0min, vr1min) == 1)
5771 /* [ ( ] ) or [ ]( ) */
5772 if (*vr0type == VR_ANTI_RANGE
5773 && vr1type == VR_ANTI_RANGE)
5775 else if (*vr0type == VR_RANGE
5776 && vr1type == VR_RANGE)
5778 else if (*vr0type == VR_RANGE
5779 && vr1type == VR_ANTI_RANGE)
5781 if (TREE_CODE (vr1min) == INTEGER_CST)
5782 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5783 build_int_cst (TREE_TYPE (vr1min), 1));
5787 else if (*vr0type == VR_ANTI_RANGE
5788 && vr1type == VR_RANGE)
5790 *vr0type = VR_RANGE;
5791 if (TREE_CODE (*vr0max) == INTEGER_CST)
5792 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5793 build_int_cst (TREE_TYPE (*vr0max), 1));
5801 else if ((operand_less_p (*vr0min, vr1max) == 1
5802 || operand_equal_p (*vr0min, vr1max, 0))
5803 && operand_less_p (vr1min, *vr0min) == 1)
5805 /* ( [ ) ] or ( )[ ] */
5806 if (*vr0type == VR_ANTI_RANGE
5807 && vr1type == VR_ANTI_RANGE)
5809 else if (*vr0type == VR_RANGE
5810 && vr1type == VR_RANGE)
5812 else if (*vr0type == VR_RANGE
5813 && vr1type == VR_ANTI_RANGE)
5815 if (TREE_CODE (vr1max) == INTEGER_CST)
5816 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5817 build_int_cst (TREE_TYPE (vr1max), 1));
5821 else if (*vr0type == VR_ANTI_RANGE
5822 && vr1type == VR_RANGE)
5824 *vr0type = VR_RANGE;
5825 if (TREE_CODE (*vr0min) == INTEGER_CST)
5826 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5827 build_int_cst (TREE_TYPE (*vr0min), 1));
5836 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
5837 result for the intersection. That's always a conservative
5838 correct estimate unless VR1 is a constant singleton range
5839 in which case we choose that. */
5840 if (vr1type == VR_RANGE
5841 && is_gimple_min_invariant (vr1min)
5842 && vrp_operand_equal_p (vr1min, vr1max))
5853 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
5854 in *VR0. This may not be the smallest possible such range. */
5857 vrp_intersect_ranges_1 (value_range *vr0, const value_range *vr1)
5861 /* If either range is VR_VARYING the other one wins. */
5862 if (vr1->type == VR_VARYING)
5864 if (vr0->type == VR_VARYING)
5866 copy_value_range (vr0, vr1);
5870 /* When either range is VR_UNDEFINED the resulting range is
5871 VR_UNDEFINED, too. */
5872 if (vr0->type == VR_UNDEFINED)
5874 if (vr1->type == VR_UNDEFINED)
5876 set_value_range_to_undefined (vr0);
5880 /* Save the original vr0 so we can return it as conservative intersection
5881 result when our worker turns things to varying. */
5883 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
5884 vr1->type, vr1->min, vr1->max);
5885 /* Make sure to canonicalize the result though as the inversion of a
5886 VR_RANGE can still be a VR_RANGE. */
5887 set_and_canonicalize_value_range (vr0, vr0->type,
5888 vr0->min, vr0->max, vr0->equiv);
5889 /* If that failed, use the saved original VR0. */
5890 if (vr0->type == VR_VARYING)
5895 /* If the result is VR_UNDEFINED there is no need to mess with
5896 the equivalencies. */
5897 if (vr0->type == VR_UNDEFINED)
5900 /* The resulting set of equivalences for range intersection is the union of
5902 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5903 bitmap_ior_into (vr0->equiv, vr1->equiv);
5904 else if (vr1->equiv && !vr0->equiv)
5906 /* All equivalence bitmaps are allocated from the same obstack. So
5907 we can use the obstack associated with VR to allocate vr0->equiv. */
5908 vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
5909 bitmap_copy (vr0->equiv, vr1->equiv);
5914 vrp_intersect_ranges (value_range *vr0, const value_range *vr1)
5916 if (dump_file && (dump_flags & TDF_DETAILS))
5918 fprintf (dump_file, "Intersecting\n ");
5919 dump_value_range (dump_file, vr0);
5920 fprintf (dump_file, "\nand\n ");
5921 dump_value_range (dump_file, vr1);
5922 fprintf (dump_file, "\n");
5924 vrp_intersect_ranges_1 (vr0, vr1);
5925 if (dump_file && (dump_flags & TDF_DETAILS))
5927 fprintf (dump_file, "to\n ");
5928 dump_value_range (dump_file, vr0);
5929 fprintf (dump_file, "\n");
5933 /* Meet operation for value ranges. Given two value ranges VR0 and
5934 VR1, store in VR0 a range that contains both VR0 and VR1. This
5935 may not be the smallest possible such range. */
5938 vrp_meet_1 (value_range *vr0, const value_range *vr1)
5942 if (vr0->type == VR_UNDEFINED)
5944 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
5948 if (vr1->type == VR_UNDEFINED)
5950 /* VR0 already has the resulting range. */
5954 if (vr0->type == VR_VARYING)
5956 /* Nothing to do. VR0 already has the resulting range. */
5960 if (vr1->type == VR_VARYING)
5962 set_value_range_to_varying (vr0);
5967 union_ranges (&vr0->type, &vr0->min, &vr0->max,
5968 vr1->type, vr1->min, vr1->max);
5969 if (vr0->type == VR_VARYING)
5971 /* Failed to find an efficient meet. Before giving up and setting
5972 the result to VARYING, see if we can at least derive a useful
5974 if (range_includes_zero_p (&saved) == 0
5975 && range_includes_zero_p (vr1) == 0)
5977 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
5979 /* Since this meet operation did not result from the meeting of
5980 two equivalent names, VR0 cannot have any equivalences. */
5982 bitmap_clear (vr0->equiv);
5986 set_value_range_to_varying (vr0);
5989 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
5991 if (vr0->type == VR_VARYING)
5994 /* The resulting set of equivalences is always the intersection of
5996 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5997 bitmap_and_into (vr0->equiv, vr1->equiv);
5998 else if (vr0->equiv && !vr1->equiv)
5999 bitmap_clear (vr0->equiv);
6003 vrp_meet (value_range *vr0, const value_range *vr1)
6005 if (dump_file && (dump_flags & TDF_DETAILS))
6007 fprintf (dump_file, "Meeting\n ");
6008 dump_value_range (dump_file, vr0);
6009 fprintf (dump_file, "\nand\n ");
6010 dump_value_range (dump_file, vr1);
6011 fprintf (dump_file, "\n");
6013 vrp_meet_1 (vr0, vr1);
6014 if (dump_file && (dump_flags & TDF_DETAILS))
6016 fprintf (dump_file, "to\n ");
6017 dump_value_range (dump_file, vr0);
6018 fprintf (dump_file, "\n");
6023 /* Visit all arguments for PHI node PHI that flow through executable
6024 edges. If a valid value range can be derived from all the incoming
6025 value ranges, set a new range for the LHS of PHI. */
6027 enum ssa_prop_result
6028 vrp_prop::visit_phi (gphi *phi)
6030 tree lhs = PHI_RESULT (phi);
6031 value_range vr_result = VR_INITIALIZER;
6032 extract_range_from_phi_node (phi, &vr_result);
6033 if (update_value_range (lhs, &vr_result))
6035 if (dump_file && (dump_flags & TDF_DETAILS))
6037 fprintf (dump_file, "Found new range for ");
6038 print_generic_expr (dump_file, lhs);
6039 fprintf (dump_file, ": ");
6040 dump_value_range (dump_file, &vr_result);
6041 fprintf (dump_file, "\n");
6044 if (vr_result.type == VR_VARYING)
6045 return SSA_PROP_VARYING;
6047 return SSA_PROP_INTERESTING;
6050 /* Nothing changed, don't add outgoing edges. */
6051 return SSA_PROP_NOT_INTERESTING;
6054 class vrp_folder : public substitute_and_fold_engine
6057 tree get_value (tree) FINAL OVERRIDE;
6058 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6059 bool fold_predicate_in (gimple_stmt_iterator *);
6061 class vr_values *vr_values;
6064 tree vrp_evaluate_conditional (tree_code code, tree op0,
6065 tree op1, gimple *stmt)
6066 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6067 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6068 { return vr_values->simplify_stmt_using_ranges (gsi); }
6069 tree op_with_constant_singleton_value_range (tree op)
6070 { return vr_values->op_with_constant_singleton_value_range (op); }
6073 /* If the statement pointed by SI has a predicate whose value can be
6074 computed using the value range information computed by VRP, compute
6075 its value and return true. Otherwise, return false. */
6078 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6080 bool assignment_p = false;
6082 gimple *stmt = gsi_stmt (*si);
6084 if (is_gimple_assign (stmt)
6085 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6087 assignment_p = true;
6088 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6089 gimple_assign_rhs1 (stmt),
6090 gimple_assign_rhs2 (stmt),
6093 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6094 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6095 gimple_cond_lhs (cond_stmt),
6096 gimple_cond_rhs (cond_stmt),
6104 val = fold_convert (gimple_expr_type (stmt), val);
6108 fprintf (dump_file, "Folding predicate ");
6109 print_gimple_expr (dump_file, stmt, 0);
6110 fprintf (dump_file, " to ");
6111 print_generic_expr (dump_file, val);
6112 fprintf (dump_file, "\n");
6115 if (is_gimple_assign (stmt))
6116 gimple_assign_set_rhs_from_tree (si, val);
6119 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6120 gcond *cond_stmt = as_a <gcond *> (stmt);
6121 if (integer_zerop (val))
6122 gimple_cond_make_false (cond_stmt);
6123 else if (integer_onep (val))
6124 gimple_cond_make_true (cond_stmt);
6135 /* Callback for substitute_and_fold folding the stmt at *SI. */
6138 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6140 if (fold_predicate_in (si))
6143 return simplify_stmt_using_ranges (si);
6146 /* If OP has a value range with a single constant value return that,
6147 otherwise return NULL_TREE. This returns OP itself if OP is a
6150 Implemented as a pure wrapper right now, but this will change. */
6153 vrp_folder::get_value (tree op)
6155 return op_with_constant_singleton_value_range (op);
6158 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6159 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6160 BB. If no such ASSERT_EXPR is found, return OP. */
6163 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6165 imm_use_iterator imm_iter;
6167 use_operand_p use_p;
6169 if (TREE_CODE (op) == SSA_NAME)
6171 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6173 use_stmt = USE_STMT (use_p);
6174 if (use_stmt != stmt
6175 && gimple_assign_single_p (use_stmt)
6176 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6177 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6178 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6179 return gimple_assign_lhs (use_stmt);
6186 static class vr_values *x_vr_values;
6188 /* A trivial wrapper so that we can present the generic jump threading
6189 code with a simple API for simplifying statements. STMT is the
6190 statement we want to simplify, WITHIN_STMT provides the location
6191 for any overflow warnings. */
6194 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6195 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6198 /* First see if the conditional is in the hash table. */
6199 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6200 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6203 vr_values *vr_values = x_vr_values;
6204 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6206 tree op0 = gimple_cond_lhs (cond_stmt);
6207 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6209 tree op1 = gimple_cond_rhs (cond_stmt);
6210 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6212 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6213 op0, op1, within_stmt);
6216 /* We simplify a switch statement by trying to determine which case label
6217 will be taken. If we are successful then we return the corresponding
6219 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6221 tree op = gimple_switch_index (switch_stmt);
6222 if (TREE_CODE (op) != SSA_NAME)
6225 op = lhs_of_dominating_assert (op, bb, stmt);
6227 const value_range *vr = vr_values->get_value_range (op);
6228 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6229 || symbolic_range_p (vr))
6232 if (vr->type == VR_RANGE)
6235 /* Get the range of labels that contain a part of the operand's
6237 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6239 /* Is there only one such label? */
6242 tree label = gimple_switch_label (switch_stmt, i);
6244 /* The i'th label will be taken only if the value range of the
6245 operand is entirely within the bounds of this label. */
6246 if (CASE_HIGH (label) != NULL_TREE
6247 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6248 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6249 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6250 && tree_int_cst_equal (vr->min, vr->max)))
6254 /* If there are no such labels then the default label will be
6257 return gimple_switch_label (switch_stmt, 0);
6260 if (vr->type == VR_ANTI_RANGE)
6262 unsigned n = gimple_switch_num_labels (switch_stmt);
6263 tree min_label = gimple_switch_label (switch_stmt, 1);
6264 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6266 /* The default label will be taken only if the anti-range of the
6267 operand is entirely outside the bounds of all the (non-default)
6269 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6270 && (CASE_HIGH (max_label) != NULL_TREE
6271 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6272 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6273 return gimple_switch_label (switch_stmt, 0);
6279 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6281 tree lhs = gimple_assign_lhs (assign_stmt);
6282 if (TREE_CODE (lhs) == SSA_NAME
6283 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6284 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6285 && stmt_interesting_for_vrp (stmt))
6289 value_range new_vr = VR_INITIALIZER;
6290 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6291 &dummy_tree, &new_vr);
6292 if (range_int_cst_singleton_p (&new_vr))
6300 class vrp_dom_walker : public dom_walker
6303 vrp_dom_walker (cdi_direction direction,
6304 class const_and_copies *const_and_copies,
6305 class avail_exprs_stack *avail_exprs_stack)
6306 : dom_walker (direction, REACHABLE_BLOCKS),
6307 m_const_and_copies (const_and_copies),
6308 m_avail_exprs_stack (avail_exprs_stack),
6309 m_dummy_cond (NULL) {}
6311 virtual edge before_dom_children (basic_block);
6312 virtual void after_dom_children (basic_block);
6314 class vr_values *vr_values;
6317 class const_and_copies *m_const_and_copies;
6318 class avail_exprs_stack *m_avail_exprs_stack;
6320 gcond *m_dummy_cond;
6324 /* Called before processing dominator children of BB. We want to look
6325 at ASSERT_EXPRs and record information from them in the appropriate
6328 We could look at other statements here. It's not seen as likely
6329 to significantly increase the jump threads we discover. */
6332 vrp_dom_walker::before_dom_children (basic_block bb)
6334 gimple_stmt_iterator gsi;
6336 m_avail_exprs_stack->push_marker ();
6337 m_const_and_copies->push_marker ();
6338 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6340 gimple *stmt = gsi_stmt (gsi);
6341 if (gimple_assign_single_p (stmt)
6342 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6344 tree rhs1 = gimple_assign_rhs1 (stmt);
6345 tree cond = TREE_OPERAND (rhs1, 1);
6346 tree inverted = invert_truthvalue (cond);
6347 vec<cond_equivalence> p;
6349 record_conditions (&p, cond, inverted);
6350 for (unsigned int i = 0; i < p.length (); i++)
6351 m_avail_exprs_stack->record_cond (&p[i]);
6353 tree lhs = gimple_assign_lhs (stmt);
6354 m_const_and_copies->record_const_or_copy (lhs,
6355 TREE_OPERAND (rhs1, 0));
6364 /* Called after processing dominator children of BB. This is where we
6365 actually call into the threader. */
6367 vrp_dom_walker::after_dom_children (basic_block bb)
6370 m_dummy_cond = gimple_build_cond (NE_EXPR,
6371 integer_zero_node, integer_zero_node,
6374 x_vr_values = vr_values;
6375 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6376 m_avail_exprs_stack, NULL,
6377 simplify_stmt_for_jump_threading);
6380 m_avail_exprs_stack->pop_to_marker ();
6381 m_const_and_copies->pop_to_marker ();
6384 /* Blocks which have more than one predecessor and more than
6385 one successor present jump threading opportunities, i.e.,
6386 when the block is reached from a specific predecessor, we
6387 may be able to determine which of the outgoing edges will
6388 be traversed. When this optimization applies, we are able
6389 to avoid conditionals at runtime and we may expose secondary
6390 optimization opportunities.
6392 This routine is effectively a driver for the generic jump
6393 threading code. It basically just presents the generic code
6394 with edges that may be suitable for jump threading.
6396 Unlike DOM, we do not iterate VRP if jump threading was successful.
6397 While iterating may expose new opportunities for VRP, it is expected
6398 those opportunities would be very limited and the compile time cost
6399 to expose those opportunities would be significant.
6401 As jump threading opportunities are discovered, they are registered
6402 for later realization. */
6405 identify_jump_threads (class vr_values *vr_values)
6410 /* Ugh. When substituting values earlier in this pass we can
6411 wipe the dominance information. So rebuild the dominator
6412 information as we need it within the jump threading code. */
6413 calculate_dominance_info (CDI_DOMINATORS);
6415 /* We do not allow VRP information to be used for jump threading
6416 across a back edge in the CFG. Otherwise it becomes too
6417 difficult to avoid eliminating loop exit tests. Of course
6418 EDGE_DFS_BACK is not accurate at this time so we have to
6420 mark_dfs_back_edges ();
6422 /* Do not thread across edges we are about to remove. Just marking
6423 them as EDGE_IGNORE will do. */
6424 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6425 e->flags |= EDGE_IGNORE;
6427 /* Allocate our unwinder stack to unwind any temporary equivalences
6428 that might be recorded. */
6429 const_and_copies *equiv_stack = new const_and_copies ();
6431 hash_table<expr_elt_hasher> *avail_exprs
6432 = new hash_table<expr_elt_hasher> (1024);
6433 avail_exprs_stack *avail_exprs_stack
6434 = new class avail_exprs_stack (avail_exprs);
6436 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6437 walker.vr_values = vr_values;
6438 walker.walk (cfun->cfg->x_entry_block_ptr);
6440 /* Clear EDGE_IGNORE. */
6441 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6442 e->flags &= ~EDGE_IGNORE;
6444 /* We do not actually update the CFG or SSA graphs at this point as
6445 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6446 handle ASSERT_EXPRs gracefully. */
6449 delete avail_exprs_stack;
6452 /* Traverse all the blocks folding conditionals with known ranges. */
6455 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6459 /* We have completed propagating through the lattice. */
6460 vr_values.set_lattice_propagation_complete ();
6464 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6465 vr_values.dump_all_value_ranges (dump_file);
6466 fprintf (dump_file, "\n");
6469 /* Set value range to non pointer SSA_NAMEs. */
6470 for (i = 0; i < num_ssa_names; i++)
6472 tree name = ssa_name (i);
6476 const value_range *vr = get_value_range (name);
6478 || (vr->type == VR_VARYING)
6479 || (vr->type == VR_UNDEFINED)
6480 || (TREE_CODE (vr->min) != INTEGER_CST)
6481 || (TREE_CODE (vr->max) != INTEGER_CST))
6484 if (POINTER_TYPE_P (TREE_TYPE (name))
6485 && range_includes_zero_p (vr) == 0)
6486 set_ptr_nonnull (name);
6487 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6488 set_range_info (name, vr->type,
6489 wi::to_wide (vr->min),
6490 wi::to_wide (vr->max));
6493 /* If we're checking array refs, we want to merge information on
6494 the executability of each edge between vrp_folder and the
6495 check_array_bounds_dom_walker: each can clear the
6496 EDGE_EXECUTABLE flag on edges, in different ways.
6498 Hence, if we're going to call check_all_array_refs, set
6499 the flag on every edge now, rather than in
6500 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6501 it from some edges. */
6502 if (warn_array_bounds && warn_array_bounds_p)
6503 set_all_edges_as_executable (cfun);
6505 class vrp_folder vrp_folder;
6506 vrp_folder.vr_values = &vr_values;
6507 vrp_folder.substitute_and_fold ();
6509 if (warn_array_bounds && warn_array_bounds_p)
6510 check_all_array_refs ();
6513 /* Main entry point to VRP (Value Range Propagation). This pass is
6514 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6515 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6516 Programming Language Design and Implementation, pp. 67-78, 1995.
6517 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6519 This is essentially an SSA-CCP pass modified to deal with ranges
6520 instead of constants.
6522 While propagating ranges, we may find that two or more SSA name
6523 have equivalent, though distinct ranges. For instance,
6526 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6528 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6532 In the code above, pointer p_5 has range [q_2, q_2], but from the
6533 code we can also determine that p_5 cannot be NULL and, if q_2 had
6534 a non-varying range, p_5's range should also be compatible with it.
6536 These equivalences are created by two expressions: ASSERT_EXPR and
6537 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6538 result of another assertion, then we can use the fact that p_5 and
6539 p_4 are equivalent when evaluating p_5's range.
6541 Together with value ranges, we also propagate these equivalences
6542 between names so that we can take advantage of information from
6543 multiple ranges when doing final replacement. Note that this
6544 equivalency relation is transitive but not symmetric.
6546 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6547 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6548 in contexts where that assertion does not hold (e.g., in line 6).
6550 TODO, the main difference between this pass and Patterson's is that
6551 we do not propagate edge probabilities. We only compute whether
6552 edges can be taken or not. That is, instead of having a spectrum
6553 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6554 DON'T KNOW. In the future, it may be worthwhile to propagate
6555 probabilities to aid branch prediction. */
6558 execute_vrp (bool warn_array_bounds_p)
6564 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6565 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6568 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6569 Inserting assertions may split edges which will invalidate
6571 insert_range_assertions ();
6573 to_remove_edges.create (10);
6574 to_update_switch_stmts.create (5);
6575 threadedge_initialize_values ();
6577 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6578 mark_dfs_back_edges ();
6580 class vrp_prop vrp_prop;
6581 vrp_prop.vrp_initialize ();
6582 vrp_prop.ssa_propagate ();
6583 vrp_prop.vrp_finalize (warn_array_bounds_p);
6585 /* We must identify jump threading opportunities before we release
6586 the datastructures built by VRP. */
6587 identify_jump_threads (&vrp_prop.vr_values);
6589 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6590 was set by a type conversion can often be rewritten to use the
6591 RHS of the type conversion.
6593 However, doing so inhibits jump threading through the comparison.
6594 So that transformation is not performed until after jump threading
6597 FOR_EACH_BB_FN (bb, cfun)
6599 gimple *last = last_stmt (bb);
6600 if (last && gimple_code (last) == GIMPLE_COND)
6601 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6604 free_numbers_of_iterations_estimates (cfun);
6606 /* ASSERT_EXPRs must be removed before finalizing jump threads
6607 as finalizing jump threads calls the CFG cleanup code which
6608 does not properly handle ASSERT_EXPRs. */
6609 remove_range_assertions ();
6611 /* If we exposed any new variables, go ahead and put them into
6612 SSA form now, before we handle jump threading. This simplifies
6613 interactions between rewriting of _DECL nodes into SSA form
6614 and rewriting SSA_NAME nodes into SSA form after block
6615 duplication and CFG manipulation. */
6616 update_ssa (TODO_update_ssa);
6618 /* We identified all the jump threading opportunities earlier, but could
6619 not transform the CFG at that time. This routine transforms the
6620 CFG and arranges for the dominator tree to be rebuilt if necessary.
6622 Note the SSA graph update will occur during the normal TODO
6623 processing by the pass manager. */
6624 thread_through_all_blocks (false);
6626 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
6627 CFG in a broken state and requires a cfg_cleanup run. */
6628 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6630 /* Update SWITCH_EXPR case label vector. */
6631 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
6634 size_t n = TREE_VEC_LENGTH (su->vec);
6636 gimple_switch_set_num_labels (su->stmt, n);
6637 for (j = 0; j < n; j++)
6638 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
6639 /* As we may have replaced the default label with a regular one
6640 make sure to make it a real default label again. This ensures
6641 optimal expansion. */
6642 label = gimple_switch_label (su->stmt, 0);
6643 CASE_LOW (label) = NULL_TREE;
6644 CASE_HIGH (label) = NULL_TREE;
6647 if (to_remove_edges.length () > 0)
6649 free_dominance_info (CDI_DOMINATORS);
6650 loops_state_set (LOOPS_NEED_FIXUP);
6653 to_remove_edges.release ();
6654 to_update_switch_stmts.release ();
6655 threadedge_finalize_values ();
6658 loop_optimizer_finalize ();
6664 const pass_data pass_data_vrp =
6666 GIMPLE_PASS, /* type */
6668 OPTGROUP_NONE, /* optinfo_flags */
6669 TV_TREE_VRP, /* tv_id */
6670 PROP_ssa, /* properties_required */
6671 0, /* properties_provided */
6672 0, /* properties_destroyed */
6673 0, /* todo_flags_start */
6674 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6677 class pass_vrp : public gimple_opt_pass
6680 pass_vrp (gcc::context *ctxt)
6681 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6684 /* opt_pass methods: */
6685 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6686 void set_pass_param (unsigned int n, bool param)
6688 gcc_assert (n == 0);
6689 warn_array_bounds_p = param;
6691 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6692 virtual unsigned int execute (function *)
6693 { return execute_vrp (warn_array_bounds_p); }
6696 bool warn_array_bounds_p;
6697 }; // class pass_vrp
6702 make_pass_vrp (gcc::context *ctxt)
6704 return new pass_vrp (ctxt);
6708 /* Worker for determine_value_range. */
6711 determine_value_range_1 (value_range *vr, tree expr)
6713 if (BINARY_CLASS_P (expr))
6715 value_range vr0 = VR_INITIALIZER, vr1 = VR_INITIALIZER;
6716 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6717 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6718 extract_range_from_binary_expr_1 (vr, TREE_CODE (expr), TREE_TYPE (expr),
6721 else if (UNARY_CLASS_P (expr))
6723 value_range vr0 = VR_INITIALIZER;
6724 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6725 extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6726 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6728 else if (TREE_CODE (expr) == INTEGER_CST)
6729 set_value_range_to_value (vr, expr, NULL);
6732 value_range_type kind;
6734 /* For SSA names try to extract range info computed by VRP. Otherwise
6735 fall back to varying. */
6736 if (TREE_CODE (expr) == SSA_NAME
6737 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6738 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6739 set_value_range (vr, kind, wide_int_to_tree (TREE_TYPE (expr), min),
6740 wide_int_to_tree (TREE_TYPE (expr), max), NULL);
6742 set_value_range_to_varying (vr);
6746 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6747 the determined range type. */
6750 determine_value_range (tree expr, wide_int *min, wide_int *max)
6752 value_range vr = VR_INITIALIZER;
6753 determine_value_range_1 (&vr, expr);
6754 if ((vr.type == VR_RANGE
6755 || vr.type == VR_ANTI_RANGE)
6756 && !symbolic_range_p (&vr))
6758 *min = wi::to_wide (vr.min);
6759 *max = wi::to_wide (vr.max);