1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2022 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "basic-block.h"
28 #include "dominance.h"
33 #include "tree-pass.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
38 #include "gimple-iterator.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-into-ssa.h"
44 #include "tree-scalar-evolution.h"
45 #include "tree-ssa-propagate.h"
47 #include "vr-values.h"
48 #include "gimple-array-bounds.h"
49 #include "gimple-range.h"
50 #include "gimple-range-path.h"
51 #include "value-pointer-equiv.h"
52 #include "gimple-fold.h"
54 #include "tree-ssa-dce.h"
56 // This class is utilized by VRP and ranger to remove __builtin_unreachable
57 // calls, and reflect any resulting global ranges.
59 // maybe_register_block () is called on basic blocks, and if that block
60 // matches the pattern of one branch being a builtin_unreachable, register
61 // the resulting executable edge in a list.
63 // After all blocks have been processed, remove_and_update_globals() will
64 // - check all exports from registered blocks
65 // - ensure the cache entry of each export is set with the appropriate range
66 // - rewrite the conditions to take the executable edge
67 // - perform DCE on any feeding instructions to those rewritten conditions
69 // Then each of the immediate use chain of each export is walked, and a new
70 // global range created by unioning the ranges at all remaining use locations.
72 class remove_unreachable {
74 remove_unreachable (gimple_ranger &r) : m_ranger (r) { m_list.create (30); }
75 ~remove_unreachable () { m_list.release (); }
76 void maybe_register_block (basic_block bb);
77 bool remove_and_update_globals (bool final_p);
79 gimple_ranger &m_ranger;
82 // Check if block BB has a __builtin_unreachable () call on one arm, and
83 // register the executable edge if so.
86 remove_unreachable::maybe_register_block (basic_block bb)
88 gimple *s = gimple_outgoing_range_stmt_p (bb);
89 if (!s || gimple_code (s) != GIMPLE_COND)
92 edge e0 = EDGE_SUCC (bb, 0);
93 basic_block bb0 = e0->dest;
94 bool un0 = EDGE_COUNT (bb0->succs) == 0
95 && gimple_seq_unreachable_p (bb_seq (bb0));
96 edge e1 = EDGE_SUCC (bb, 1);
97 basic_block bb1 = e1->dest;
98 bool un1 = EDGE_COUNT (bb1->succs) == 0
99 && gimple_seq_unreachable_p (bb_seq (bb1));
101 // If the 2 blocks are not different, ignore.
106 m_list.safe_push (e1);
108 m_list.safe_push (e0);
111 // Process the edges in the list, change the conditions and removing any
112 // dead code feeding those conditions. Calculate the range of any
113 // names that may have been exported from those blocks, and determine if
114 // there is any updates to their global ranges..
115 // FINAL_P indicates all builtin_unreachable calls should be removed.
116 // Return true if any builtin_unreachables/globals eliminated/updated.
119 remove_unreachable::remove_and_update_globals (bool final_p)
121 if (m_list.length () == 0)
128 auto_bitmap all_exports;
129 for (i = 0; i < m_list.length (); i++)
132 gimple *s = gimple_outgoing_range_stmt_p (e->src);
133 gcc_checking_assert (gimple_code (s) == GIMPLE_COND);
134 bool lhs_p = TREE_CODE (gimple_cond_lhs (s)) == SSA_NAME;
135 bool rhs_p = TREE_CODE (gimple_cond_rhs (s)) == SSA_NAME;
136 // Do not remove __builtin_unreachable if it confers a relation, or
137 // that relation will be lost in subsequent passes. Unless its the
139 if (!final_p && lhs_p && rhs_p)
141 // If this is already a constant condition, don't look either
142 if (!lhs_p && !rhs_p)
145 bool dominate_exit_p = true;
146 FOR_EACH_GORI_EXPORT_NAME (m_ranger.gori (), e->src, name)
148 // Ensure the cache is set for NAME in the succ block.
149 Value_Range r(TREE_TYPE (name));
150 Value_Range ex(TREE_TYPE (name));
151 m_ranger.range_on_entry (r, e->dest, name);
152 m_ranger.range_on_entry (ex, EXIT_BLOCK_PTR_FOR_FN (cfun), name);
153 // If the range produced by this __builtin_unreachacble expression
154 // is not fully reflected in the range at exit, then it does not
155 // dominate the exit of the funciton.
156 if (ex.intersect (r))
157 dominate_exit_p = false;
160 // If the exit is dominated, add to the export list. Otherwise if this
161 // isn't the final VRP pass, leave the call in the IL.
163 bitmap_ior_into (all_exports, m_ranger.gori ().exports (e->src));
168 // Rewrite the condition.
169 if (e->flags & EDGE_TRUE_VALUE)
170 gimple_cond_make_true (as_a<gcond *> (s));
172 gimple_cond_make_false (as_a<gcond *> (s));
176 if (bitmap_empty_p (all_exports))
178 // Invoke DCE on all exported names to elimnate dead feeding defs
180 bitmap_copy (dce, all_exports);
181 // Don't attempt to DCE parameters.
182 EXECUTE_IF_SET_IN_BITMAP (all_exports, 0, i, bi)
183 if (!ssa_name (i) || SSA_NAME_IS_DEFAULT_DEF (ssa_name (i)))
184 bitmap_clear_bit (dce, i);
185 simple_dce_from_worklist (dce);
187 // Loop over all uses of each name and find maximal range. This is the
190 imm_use_iterator iter;
191 EXECUTE_IF_SET_IN_BITMAP (all_exports, 0, i, bi)
194 if (!name || SSA_NAME_IN_FREE_LIST (name))
196 Value_Range r (TREE_TYPE (name));
197 Value_Range exp_range (TREE_TYPE (name));
199 FOR_EACH_IMM_USE_FAST (use_p, iter, name)
201 gimple *use_stmt = USE_STMT (use_p);
202 if (is_gimple_debug (use_stmt))
204 if (!m_ranger.range_of_expr (exp_range, name, use_stmt))
205 exp_range.set_varying (TREE_TYPE (name));
206 r.union_ (exp_range);
210 // Include the on-exit range to ensure non-dominated unreachables
211 // don't incorrectly impact the global range.
212 m_ranger.range_on_entry (exp_range, EXIT_BLOCK_PTR_FOR_FN (cfun), name);
213 r.union_ (exp_range);
214 if (r.varying_p () || r.undefined_p ())
216 if (!set_range_info (name, r))
221 fprintf (dump_file, "Global Exported (via unreachable): ");
222 print_generic_expr (dump_file, name, TDF_SLIM);
223 fprintf (dump_file, " = ");
224 gimple_range_global (r, name);
226 fputc ('\n', dump_file);
232 /* Set of SSA names found live during the RPO traversal of the function
233 for still active basic-blocks. */
239 void set (tree, basic_block);
240 void clear (tree, basic_block);
241 void merge (basic_block dest, basic_block src);
242 bool live_on_block_p (tree, basic_block);
243 bool live_on_edge_p (tree, edge);
244 bool block_has_live_names_p (basic_block);
245 void clear_block (basic_block);
250 void init_bitmap_if_needed (basic_block);
254 live_names::init_bitmap_if_needed (basic_block bb)
256 unsigned i = bb->index;
259 live[i] = sbitmap_alloc (num_ssa_names);
260 bitmap_clear (live[i]);
265 live_names::block_has_live_names_p (basic_block bb)
267 unsigned i = bb->index;
268 return live[i] && bitmap_empty_p (live[i]);
272 live_names::clear_block (basic_block bb)
274 unsigned i = bb->index;
277 sbitmap_free (live[i]);
283 live_names::merge (basic_block dest, basic_block src)
285 init_bitmap_if_needed (dest);
286 init_bitmap_if_needed (src);
287 bitmap_ior (live[dest->index], live[dest->index], live[src->index]);
291 live_names::set (tree name, basic_block bb)
293 init_bitmap_if_needed (bb);
294 bitmap_set_bit (live[bb->index], SSA_NAME_VERSION (name));
298 live_names::clear (tree name, basic_block bb)
300 unsigned i = bb->index;
302 bitmap_clear_bit (live[i], SSA_NAME_VERSION (name));
305 live_names::live_names ()
307 num_blocks = last_basic_block_for_fn (cfun);
308 live = XCNEWVEC (sbitmap, num_blocks);
311 live_names::~live_names ()
313 for (unsigned i = 0; i < num_blocks; ++i)
315 sbitmap_free (live[i]);
320 live_names::live_on_block_p (tree name, basic_block bb)
322 return (live[bb->index]
323 && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
326 /* Return true if the SSA name NAME is live on the edge E. */
329 live_names::live_on_edge_p (tree name, edge e)
331 return live_on_block_p (name, e->dest);
335 /* VR_TYPE describes a range with mininum value *MIN and maximum
336 value *MAX. Restrict the range to the set of values that have
337 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
338 return the new range type.
340 SGN gives the sign of the values described by the range. */
342 enum value_range_kind
343 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
344 wide_int *min, wide_int *max,
345 const wide_int &nonzero_bits,
348 if (vr_type == VR_ANTI_RANGE)
350 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
351 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
352 to create an inclusive upper bound for A and an inclusive lower
354 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
355 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
357 /* If the calculation of A_MAX wrapped, A is effectively empty
358 and A_MAX is the highest value that satisfies NONZERO_BITS.
359 Likewise if the calculation of B_MIN wrapped, B is effectively
360 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
361 bool a_empty = wi::ge_p (a_max, *min, sgn);
362 bool b_empty = wi::le_p (b_min, *max, sgn);
364 /* If both A and B are empty, there are no valid values. */
365 if (a_empty && b_empty)
368 /* If exactly one of A or B is empty, return a VR_RANGE for the
370 if (a_empty || b_empty)
374 gcc_checking_assert (wi::le_p (*min, *max, sgn));
378 /* Update the VR_ANTI_RANGE bounds. */
381 gcc_checking_assert (wi::le_p (*min, *max, sgn));
383 /* Now check whether the excluded range includes any values that
384 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
385 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
387 unsigned int precision = min->get_precision ();
388 *min = wi::min_value (precision, sgn);
389 *max = wi::max_value (precision, sgn);
393 if (vr_type == VR_RANGE || vr_type == VR_VARYING)
395 *max = wi::round_down_for_mask (*max, nonzero_bits);
397 /* Check that the range contains at least one valid value. */
398 if (wi::gt_p (*min, *max, sgn))
401 *min = wi::round_up_for_mask (*min, nonzero_bits);
402 gcc_checking_assert (wi::le_p (*min, *max, sgn));
407 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
411 range_int_cst_p (const value_range *vr)
413 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
416 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
417 otherwise. We only handle additive operations and set NEG to true if the
418 symbol is negated and INV to the invariant part, if any. */
421 get_single_symbol (tree t, bool *neg, tree *inv)
429 if (TREE_CODE (t) == PLUS_EXPR
430 || TREE_CODE (t) == POINTER_PLUS_EXPR
431 || TREE_CODE (t) == MINUS_EXPR)
433 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
435 neg_ = (TREE_CODE (t) == MINUS_EXPR);
436 inv_ = TREE_OPERAND (t, 0);
437 t = TREE_OPERAND (t, 1);
439 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
442 inv_ = TREE_OPERAND (t, 1);
443 t = TREE_OPERAND (t, 0);
454 if (TREE_CODE (t) == NEGATE_EXPR)
456 t = TREE_OPERAND (t, 0);
460 if (TREE_CODE (t) != SSA_NAME)
463 if (inv_ && TREE_OVERFLOW_P (inv_))
464 inv_ = drop_tree_overflow (inv_);
471 /* The reverse operation: build a symbolic expression with TYPE
472 from symbol SYM, negated according to NEG, and invariant INV. */
475 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
477 const bool pointer_p = POINTER_TYPE_P (type);
481 t = build1 (NEGATE_EXPR, type, t);
483 if (integer_zerop (inv))
486 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
492 -2 if those are incomparable. */
494 operand_less_p (tree val, tree val2)
496 /* LT is folded faster than GE and others. Inline the common case. */
497 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
498 return tree_int_cst_lt (val, val2);
499 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
500 return val == val2 ? 0 : -2;
503 int cmp = compare_values (val, val2);
506 else if (cmp == 0 || cmp == 1)
513 /* Compare two values VAL1 and VAL2. Return
515 -2 if VAL1 and VAL2 cannot be compared at compile-time,
518 +1 if VAL1 > VAL2, and
521 This is similar to tree_int_cst_compare but supports pointer values
522 and values that cannot be compared at compile time.
524 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
525 true if the return value is only valid if we assume that signed
526 overflow is undefined. */
529 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
534 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
536 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
537 == POINTER_TYPE_P (TREE_TYPE (val2)));
539 /* Convert the two values into the same type. This is needed because
540 sizetype causes sign extension even for unsigned types. */
541 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
542 val2 = fold_convert (TREE_TYPE (val1), val2);
544 const bool overflow_undefined
545 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
546 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
549 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
550 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
552 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
553 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
556 /* Both values must use the same name with the same sign. */
557 if (sym1 != sym2 || neg1 != neg2)
560 /* [-]NAME + CST == [-]NAME + CST. */
564 /* If overflow is defined we cannot simplify more. */
565 if (!overflow_undefined)
568 if (strict_overflow_p != NULL
569 /* Symbolic range building sets the no-warning bit to declare
570 that overflow doesn't happen. */
571 && (!inv1 || !warning_suppressed_p (val1, OPT_Woverflow))
572 && (!inv2 || !warning_suppressed_p (val2, OPT_Woverflow)))
573 *strict_overflow_p = true;
576 inv1 = build_int_cst (TREE_TYPE (val1), 0);
578 inv2 = build_int_cst (TREE_TYPE (val2), 0);
580 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
581 TYPE_SIGN (TREE_TYPE (val1)));
584 const bool cst1 = is_gimple_min_invariant (val1);
585 const bool cst2 = is_gimple_min_invariant (val2);
587 /* If one is of the form '[-]NAME + CST' and the other is constant, then
588 it might be possible to say something depending on the constants. */
589 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
591 if (!overflow_undefined)
594 if (strict_overflow_p != NULL
595 /* Symbolic range building sets the no-warning bit to declare
596 that overflow doesn't happen. */
597 && (!sym1 || !warning_suppressed_p (val1, OPT_Woverflow))
598 && (!sym2 || !warning_suppressed_p (val2, OPT_Woverflow)))
599 *strict_overflow_p = true;
601 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
602 tree cst = cst1 ? val1 : val2;
603 tree inv = cst1 ? inv2 : inv1;
605 /* Compute the difference between the constants. If it overflows or
606 underflows, this means that we can trivially compare the NAME with
607 it and, consequently, the two values with each other. */
608 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
609 if (wi::cmp (0, wi::to_wide (inv), sgn)
610 != wi::cmp (diff, wi::to_wide (cst), sgn))
612 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
613 return cst1 ? res : -res;
619 /* We cannot say anything more for non-constants. */
623 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
625 /* We cannot compare overflowed values. */
626 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
629 if (TREE_CODE (val1) == INTEGER_CST
630 && TREE_CODE (val2) == INTEGER_CST)
631 return tree_int_cst_compare (val1, val2);
633 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
635 if (known_eq (wi::to_poly_widest (val1),
636 wi::to_poly_widest (val2)))
638 if (known_lt (wi::to_poly_widest (val1),
639 wi::to_poly_widest (val2)))
641 if (known_gt (wi::to_poly_widest (val1),
642 wi::to_poly_widest (val2)))
650 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
652 /* We cannot compare overflowed values. */
653 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
656 return tree_int_cst_compare (val1, val2);
659 /* First see if VAL1 and VAL2 are not the same. */
660 if (operand_equal_p (val1, val2, 0))
663 fold_defer_overflow_warnings ();
665 /* If VAL1 is a lower address than VAL2, return -1. */
666 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
667 if (t && integer_onep (t))
669 fold_undefer_and_ignore_overflow_warnings ();
673 /* If VAL1 is a higher address than VAL2, return +1. */
674 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
675 if (t && integer_onep (t))
677 fold_undefer_and_ignore_overflow_warnings ();
681 /* If VAL1 is different than VAL2, return +2. */
682 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
683 fold_undefer_and_ignore_overflow_warnings ();
684 if (t && integer_onep (t))
691 /* Compare values like compare_values_warnv. */
694 compare_values (tree val1, tree val2)
697 return compare_values_warnv (val1, val2, &sop);
700 /* If BOUND will include a symbolic bound, adjust it accordingly,
701 otherwise leave it as is.
703 CODE is the original operation that combined the bounds (PLUS_EXPR
706 TYPE is the type of the original operation.
708 SYM_OPn is the symbolic for OPn if it has a symbolic.
710 NEG_OPn is TRUE if the OPn was negated. */
713 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
714 tree sym_op0, tree sym_op1,
715 bool neg_op0, bool neg_op1)
717 bool minus_p = (code == MINUS_EXPR);
718 /* If the result bound is constant, we're done; otherwise, build the
719 symbolic lower bound. */
720 if (sym_op0 == sym_op1)
723 bound = build_symbolic_expr (type, sym_op0,
727 /* We may not negate if that might introduce
728 undefined overflow. */
731 || TYPE_OVERFLOW_WRAPS (type))
732 bound = build_symbolic_expr (type, sym_op1,
733 neg_op1 ^ minus_p, bound);
739 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
740 int bound according to CODE. CODE is the operation combining the
741 bound (either a PLUS_EXPR or a MINUS_EXPR).
743 TYPE is the type of the combine operation.
745 WI is the wide int to store the result.
747 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
748 if over/underflow occurred. */
751 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
752 tree type, tree op0, tree op1)
754 bool minus_p = (code == MINUS_EXPR);
755 const signop sgn = TYPE_SIGN (type);
756 const unsigned int prec = TYPE_PRECISION (type);
758 /* Combine the bounds, if any. */
762 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
764 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
767 wi = wi::to_wide (op0);
771 wi = wi::neg (wi::to_wide (op1), &ovf);
773 wi = wi::to_wide (op1);
776 wi = wi::shwi (0, prec);
779 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
780 put the result in VR.
782 TYPE is the type of the range.
784 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
785 occurred while originally calculating WMIN or WMAX. -1 indicates
786 underflow. +1 indicates overflow. 0 indicates neither. */
789 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
791 const wide_int &wmin, const wide_int &wmax,
792 wi::overflow_type min_ovf,
793 wi::overflow_type max_ovf)
795 const signop sgn = TYPE_SIGN (type);
796 const unsigned int prec = TYPE_PRECISION (type);
798 /* For one bit precision if max < min, then the swapped
799 range covers all values. */
800 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
806 if (TYPE_OVERFLOW_WRAPS (type))
808 /* If overflow wraps, truncate the values and adjust the
809 range kind and bounds appropriately. */
810 wide_int tmin = wide_int::from (wmin, prec, sgn);
811 wide_int tmax = wide_int::from (wmax, prec, sgn);
812 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
814 /* If the limits are swapped, we wrapped around and cover
816 if (wi::gt_p (tmin, tmax, sgn))
821 /* No overflow or both overflow or underflow. The
822 range kind stays VR_RANGE. */
823 min = wide_int_to_tree (type, tmin);
824 max = wide_int_to_tree (type, tmax);
828 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
829 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
831 /* Min underflow or max overflow. The range kind
832 changes to VR_ANTI_RANGE. */
836 if (wi::cmp (tmin, tmax, sgn) < 0)
839 if (wi::cmp (tmax, tem, sgn) > 0)
841 /* If the anti-range would cover nothing, drop to varying.
842 Likewise if the anti-range bounds are outside of the
844 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
849 kind = VR_ANTI_RANGE;
850 min = wide_int_to_tree (type, tmin);
851 max = wide_int_to_tree (type, tmax);
856 /* Other underflow and/or overflow, drop to VR_VARYING. */
863 /* If overflow does not wrap, saturate to the types min/max
865 wide_int type_min = wi::min_value (prec, sgn);
866 wide_int type_max = wi::max_value (prec, sgn);
868 if (min_ovf == wi::OVF_UNDERFLOW)
869 min = wide_int_to_tree (type, type_min);
870 else if (min_ovf == wi::OVF_OVERFLOW)
871 min = wide_int_to_tree (type, type_max);
873 min = wide_int_to_tree (type, wmin);
875 if (max_ovf == wi::OVF_UNDERFLOW)
876 max = wide_int_to_tree (type, type_min);
877 else if (max_ovf == wi::OVF_OVERFLOW)
878 max = wide_int_to_tree (type, type_max);
880 max = wide_int_to_tree (type, wmax);
884 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
887 extract_range_from_pointer_plus_expr (value_range *vr,
890 const value_range *vr0,
891 const value_range *vr1)
893 gcc_checking_assert (POINTER_TYPE_P (expr_type)
894 && code == POINTER_PLUS_EXPR);
895 /* For pointer types, we are really only interested in asserting
896 whether the expression evaluates to non-NULL.
897 With -fno-delete-null-pointer-checks we need to be more
898 conservative. As some object might reside at address 0,
899 then some offset could be added to it and the same offset
900 subtracted again and the result would be NULL.
902 static int a[12]; where &a[0] is NULL and
905 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
906 where the first range doesn't include zero and the second one
907 doesn't either. As the second operand is sizetype (unsigned),
908 consider all ranges where the MSB could be set as possible
909 subtractions where the result might be NULL. */
910 if ((!range_includes_zero_p (vr0)
911 || !range_includes_zero_p (vr1))
912 && !TYPE_OVERFLOW_WRAPS (expr_type)
913 && (flag_delete_null_pointer_checks
914 || (range_int_cst_p (vr1)
915 && !tree_int_cst_sign_bit (vr1->max ()))))
916 vr->set_nonzero (expr_type);
917 else if (vr0->zero_p () && vr1->zero_p ())
918 vr->set_zero (expr_type);
920 vr->set_varying (expr_type);
923 /* Extract range information from a PLUS/MINUS_EXPR and store the
927 extract_range_from_plus_minus_expr (value_range *vr,
930 const value_range *vr0_,
931 const value_range *vr1_)
933 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
935 value_range vr0 = *vr0_, vr1 = *vr1_;
936 value_range vrtem0, vrtem1;
938 /* Now canonicalize anti-ranges to ranges when they are not symbolic
939 and express ~[] op X as ([]' op X) U ([]'' op X). */
940 if (vr0.kind () == VR_ANTI_RANGE
941 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
943 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
944 if (!vrtem1.undefined_p ())
947 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
953 /* Likewise for X op ~[]. */
954 if (vr1.kind () == VR_ANTI_RANGE
955 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
957 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
958 if (!vrtem1.undefined_p ())
961 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
968 value_range_kind kind;
969 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
970 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
971 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
972 tree min = NULL_TREE, max = NULL_TREE;
974 /* This will normalize things such that calculating
975 [0,0] - VR_VARYING is not dropped to varying, but is
976 calculated as [MIN+1, MAX]. */
977 if (vr0.varying_p ())
980 vr0_min = vrp_val_min (expr_type);
981 vr0_max = vrp_val_max (expr_type);
983 if (vr1.varying_p ())
986 vr1_min = vrp_val_min (expr_type);
987 vr1_max = vrp_val_max (expr_type);
990 const bool minus_p = (code == MINUS_EXPR);
991 tree min_op0 = vr0_min;
992 tree min_op1 = minus_p ? vr1_max : vr1_min;
993 tree max_op0 = vr0_max;
994 tree max_op1 = minus_p ? vr1_min : vr1_max;
995 tree sym_min_op0 = NULL_TREE;
996 tree sym_min_op1 = NULL_TREE;
997 tree sym_max_op0 = NULL_TREE;
998 tree sym_max_op1 = NULL_TREE;
999 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1001 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1003 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1004 single-symbolic ranges, try to compute the precise resulting range,
1005 but only if we know that this resulting range will also be constant
1006 or single-symbolic. */
1007 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
1008 && (TREE_CODE (min_op0) == INTEGER_CST
1010 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1011 && (TREE_CODE (min_op1) == INTEGER_CST
1013 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1014 && (!(sym_min_op0 && sym_min_op1)
1015 || (sym_min_op0 == sym_min_op1
1016 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1017 && (TREE_CODE (max_op0) == INTEGER_CST
1019 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1020 && (TREE_CODE (max_op1) == INTEGER_CST
1022 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1023 && (!(sym_max_op0 && sym_max_op1)
1024 || (sym_max_op0 == sym_max_op1
1025 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1027 wide_int wmin, wmax;
1028 wi::overflow_type min_ovf = wi::OVF_NONE;
1029 wi::overflow_type max_ovf = wi::OVF_NONE;
1031 /* Build the bounds. */
1032 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1033 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1035 /* If the resulting range will be symbolic, we need to eliminate any
1036 explicit or implicit overflow introduced in the above computation
1037 because compare_values could make an incorrect use of it. That's
1038 why we require one of the ranges to be a singleton. */
1039 if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
1040 && ((bool)min_ovf || (bool)max_ovf
1041 || (min_op0 != max_op0 && min_op1 != max_op1)))
1043 vr->set_varying (expr_type);
1047 /* Adjust the range for possible overflow. */
1048 set_value_range_with_overflow (kind, min, max, expr_type,
1049 wmin, wmax, min_ovf, max_ovf);
1050 if (kind == VR_VARYING)
1052 vr->set_varying (expr_type);
1056 /* Build the symbolic bounds if needed. */
1057 adjust_symbolic_bound (min, code, expr_type,
1058 sym_min_op0, sym_min_op1,
1059 neg_min_op0, neg_min_op1);
1060 adjust_symbolic_bound (max, code, expr_type,
1061 sym_max_op0, sym_max_op1,
1062 neg_max_op0, neg_max_op1);
1066 /* For other cases, for example if we have a PLUS_EXPR with two
1067 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1068 to compute a precise range for such a case.
1069 ??? General even mixed range kind operations can be expressed
1070 by for example transforming ~[3, 5] + [1, 2] to range-only
1071 operations and a union primitive:
1072 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1073 [-INF+1, 4] U [6, +INF(OVF)]
1074 though usually the union is not exactly representable with
1075 a single range or anti-range as the above is
1076 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1077 but one could use a scheme similar to equivalences for this. */
1078 vr->set_varying (expr_type);
1082 /* If either MIN or MAX overflowed, then set the resulting range to
1084 if (min == NULL_TREE
1085 || TREE_OVERFLOW_P (min)
1087 || TREE_OVERFLOW_P (max))
1089 vr->set_varying (expr_type);
1093 int cmp = compare_values (min, max);
1094 if (cmp == -2 || cmp == 1)
1096 /* If the new range has its limits swapped around (MIN > MAX),
1097 then the operation caused one of them to wrap around, mark
1098 the new range VARYING. */
1099 vr->set_varying (expr_type);
1102 vr->set (min, max, kind);
1105 /* If the types passed are supported, return TRUE, otherwise set VR to
1106 VARYING and return FALSE. */
1109 supported_types_p (value_range *vr,
1113 if (!value_range_equiv::supports_p (type0)
1114 || (type1 && !value_range_equiv::supports_p (type1)))
1116 vr->set_varying (type0);
1122 /* If any of the ranges passed are defined, return TRUE, otherwise set
1123 VR to UNDEFINED and return FALSE. */
1126 defined_ranges_p (value_range *vr,
1127 const value_range *vr0, const value_range *vr1 = NULL)
1129 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
1131 vr->set_undefined ();
1138 drop_undefines_to_varying (const value_range *vr, tree expr_type)
1140 if (vr->undefined_p ())
1141 return value_range (expr_type);
1146 /* If any operand is symbolic, perform a binary operation on them and
1147 return TRUE, otherwise return FALSE. */
1150 range_fold_binary_symbolics_p (value_range *vr,
1153 const value_range *vr0_,
1154 const value_range *vr1_)
1156 if (vr0_->symbolic_p () || vr1_->symbolic_p ())
1158 value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
1159 value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
1160 if ((code == PLUS_EXPR || code == MINUS_EXPR))
1162 extract_range_from_plus_minus_expr (vr, code, expr_type,
1166 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1168 extract_range_from_pointer_plus_expr (vr, code, expr_type,
1172 range_op_handler op (code, expr_type);
1174 vr->set_varying (expr_type);
1175 vr0.normalize_symbolics ();
1176 vr1.normalize_symbolics ();
1177 return op.fold_range (*vr, expr_type, vr0, vr1);
1182 /* If operand is symbolic, perform a unary operation on it and return
1183 TRUE, otherwise return FALSE. */
1186 range_fold_unary_symbolics_p (value_range *vr,
1189 const value_range *vr0)
1191 if (vr0->symbolic_p ())
1193 if (code == NEGATE_EXPR)
1195 /* -X is simply 0 - X. */
1197 zero.set_zero (vr0->type ());
1198 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1201 if (code == BIT_NOT_EXPR)
1203 /* ~X is simply -1 - X. */
1204 value_range minusone;
1205 tree t = build_int_cst (vr0->type (), -1);
1206 minusone.set (t, t);
1207 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1210 range_op_handler op (code, expr_type);
1212 vr->set_varying (expr_type);
1213 value_range vr0_cst (*vr0);
1214 vr0_cst.normalize_symbolics ();
1215 return op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1220 /* Perform a binary operation on a pair of ranges. */
1223 range_fold_binary_expr (value_range *vr,
1224 enum tree_code code,
1226 const value_range *vr0_,
1227 const value_range *vr1_)
1229 if (!supported_types_p (vr, expr_type)
1230 || !defined_ranges_p (vr, vr0_, vr1_))
1232 range_op_handler op (code, expr_type);
1235 vr->set_varying (expr_type);
1239 if (range_fold_binary_symbolics_p (vr, code, expr_type, vr0_, vr1_))
1242 value_range vr0 (*vr0_);
1243 value_range vr1 (*vr1_);
1244 if (vr0.undefined_p ())
1245 vr0.set_varying (expr_type);
1246 if (vr1.undefined_p ())
1247 vr1.set_varying (expr_type);
1248 vr0.normalize_addresses ();
1249 vr1.normalize_addresses ();
1250 if (!op.fold_range (*vr, expr_type, vr0, vr1))
1251 vr->set_varying (expr_type);
1254 /* Perform a unary operation on a range. */
1257 range_fold_unary_expr (value_range *vr,
1258 enum tree_code code, tree expr_type,
1259 const value_range *vr0,
1262 if (!supported_types_p (vr, expr_type, vr0_type)
1263 || !defined_ranges_p (vr, vr0))
1265 range_op_handler op (code, expr_type);
1268 vr->set_varying (expr_type);
1272 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1275 value_range vr0_cst (*vr0);
1276 vr0_cst.normalize_addresses ();
1277 if (!op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type)))
1278 vr->set_varying (expr_type);
1281 /* If the range of values taken by OP can be inferred after STMT executes,
1282 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1283 describes the inferred range. Return true if a range could be
1287 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1290 *comp_code_p = ERROR_MARK;
1292 /* Do not attempt to infer anything in names that flow through
1294 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1297 /* If STMT is the last statement of a basic block with no normal
1298 successors, there is no point inferring anything about any of its
1299 operands. We would not be able to find a proper insertion point
1300 for the assertion, anyway. */
1301 if (stmt_ends_bb_p (stmt))
1306 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1307 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1313 if (infer_nonnull_range (stmt, op))
1315 *val_p = build_int_cst (TREE_TYPE (op), 0);
1316 *comp_code_p = NE_EXPR;
1323 /* Dump assert_info structure. */
1326 dump_assert_info (FILE *file, const assert_info &assert)
1328 fprintf (file, "Assert for: ");
1329 print_generic_expr (file, assert.name);
1330 fprintf (file, "\n\tPREDICATE: expr=[");
1331 print_generic_expr (file, assert.expr);
1332 fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1333 fprintf (file, "val=[");
1334 print_generic_expr (file, assert.val);
1335 fprintf (file, "]\n\n");
1339 debug (const assert_info &assert)
1341 dump_assert_info (stderr, assert);
1344 /* Dump a vector of assert_info's. */
1347 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1349 for (unsigned i = 0; i < asserts.length (); ++i)
1351 dump_assert_info (file, asserts[i]);
1352 fprintf (file, "\n");
1357 debug (const vec<assert_info> &asserts)
1359 dump_asserts_info (stderr, asserts);
1362 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
1365 add_assert_info (vec<assert_info> &asserts,
1366 tree name, tree expr, enum tree_code comp_code, tree val)
1369 info.comp_code = comp_code;
1371 if (TREE_OVERFLOW_P (val))
1372 val = drop_tree_overflow (val);
1375 asserts.safe_push (info);
1376 if (dump_enabled_p ())
1377 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1378 "Adding assert for %T from %T %s %T\n",
1379 name, expr, op_symbol_code (comp_code), val);
1382 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1383 Extract a suitable test code and value and store them into *CODE_P and
1384 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1386 If no extraction was possible, return FALSE, otherwise return TRUE.
1388 If INVERT is true, then we invert the result stored into *CODE_P. */
1391 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1392 tree cond_op0, tree cond_op1,
1393 bool invert, enum tree_code *code_p,
1396 enum tree_code comp_code;
1399 /* Otherwise, we have a comparison of the form NAME COMP VAL
1400 or VAL COMP NAME. */
1401 if (name == cond_op1)
1403 /* If the predicate is of the form VAL COMP NAME, flip
1404 COMP around because we need to register NAME as the
1405 first operand in the predicate. */
1406 comp_code = swap_tree_comparison (cond_code);
1409 else if (name == cond_op0)
1411 /* The comparison is of the form NAME COMP VAL, so the
1412 comparison code remains unchanged. */
1413 comp_code = cond_code;
1419 /* Invert the comparison code as necessary. */
1421 comp_code = invert_tree_comparison (comp_code, 0);
1423 /* VRP only handles integral and pointer types. */
1424 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1425 && ! POINTER_TYPE_P (TREE_TYPE (val)))
1428 /* Do not register always-false predicates.
1429 FIXME: this works around a limitation in fold() when dealing with
1430 enumerations. Given 'enum { N1, N2 } x;', fold will not
1431 fold 'if (x > N2)' to 'if (0)'. */
1432 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1433 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1435 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1436 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1438 if (comp_code == GT_EXPR
1440 || compare_values (val, max) == 0))
1443 if (comp_code == LT_EXPR
1445 || compare_values (val, min) == 0))
1448 *code_p = comp_code;
1453 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1454 (otherwise return VAL). VAL and MASK must be zero-extended for
1455 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
1456 (to transform signed values into unsigned) and at the end xor
1460 masked_increment (const wide_int &val_in, const wide_int &mask,
1461 const wide_int &sgnbit, unsigned int prec)
1463 wide_int bit = wi::one (prec), res;
1466 wide_int val = val_in ^ sgnbit;
1467 for (i = 0; i < prec; i++, bit += bit)
1470 if ((res & bit) == 0)
1473 res = wi::bit_and_not (val + bit, res);
1475 if (wi::gtu_p (res, val))
1476 return res ^ sgnbit;
1478 return val ^ sgnbit;
1481 /* Helper for overflow_comparison_p
1483 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1484 OP1's defining statement to see if it ultimately has the form
1485 OP0 CODE (OP0 PLUS INTEGER_CST)
1487 If so, return TRUE indicating this is an overflow test and store into
1488 *NEW_CST an updated constant that can be used in a narrowed range test.
1490 REVERSED indicates if the comparison was originally:
1494 This affects how we build the updated constant. */
1497 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1498 bool follow_assert_exprs, bool reversed, tree *new_cst)
1500 /* See if this is a relational operation between two SSA_NAMES with
1501 unsigned, overflow wrapping values. If so, check it more deeply. */
1502 if ((code == LT_EXPR || code == LE_EXPR
1503 || code == GE_EXPR || code == GT_EXPR)
1504 && TREE_CODE (op0) == SSA_NAME
1505 && TREE_CODE (op1) == SSA_NAME
1506 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1507 && TYPE_UNSIGNED (TREE_TYPE (op0))
1508 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1510 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1512 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
1513 if (follow_assert_exprs)
1515 while (gimple_assign_single_p (op1_def)
1516 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1518 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1519 if (TREE_CODE (op1) != SSA_NAME)
1521 op1_def = SSA_NAME_DEF_STMT (op1);
1525 /* Now look at the defining statement of OP1 to see if it adds
1526 or subtracts a nonzero constant from another operand. */
1528 && is_gimple_assign (op1_def)
1529 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1530 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1531 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1533 tree target = gimple_assign_rhs1 (op1_def);
1535 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1536 for one where TARGET appears on the RHS. */
1537 if (follow_assert_exprs)
1539 /* Now see if that "other operand" is op0, following the chain
1540 of ASSERT_EXPRs if necessary. */
1541 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1542 while (op0 != target
1543 && gimple_assign_single_p (op0_def)
1544 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1546 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1547 if (TREE_CODE (op0) != SSA_NAME)
1549 op0_def = SSA_NAME_DEF_STMT (op0);
1553 /* If we did not find our target SSA_NAME, then this is not
1554 an overflow test. */
1558 tree type = TREE_TYPE (op0);
1559 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1560 tree inc = gimple_assign_rhs2 (op1_def);
1562 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1564 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1571 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1572 OP1's defining statement to see if it ultimately has the form
1573 OP0 CODE (OP0 PLUS INTEGER_CST)
1575 If so, return TRUE indicating this is an overflow test and store into
1576 *NEW_CST an updated constant that can be used in a narrowed range test.
1578 These statements are left as-is in the IL to facilitate discovery of
1579 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
1580 the alternate range representation is often useful within VRP. */
1583 overflow_comparison_p (tree_code code, tree name, tree val,
1584 bool use_equiv_p, tree *new_cst)
1586 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1588 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1589 use_equiv_p, true, new_cst);
1593 /* Try to register an edge assertion for SSA name NAME on edge E for
1594 the condition COND contributing to the conditional jump pointed to by BSI.
1595 Invert the condition COND if INVERT is true. */
1598 register_edge_assert_for_2 (tree name, edge e,
1599 enum tree_code cond_code,
1600 tree cond_op0, tree cond_op1, bool invert,
1601 vec<assert_info> &asserts)
1604 enum tree_code comp_code;
1606 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1609 invert, &comp_code, &val))
1612 /* Queue the assert. */
1614 if (overflow_comparison_p (comp_code, name, val, false, &x))
1616 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1617 ? GT_EXPR : LE_EXPR);
1618 add_assert_info (asserts, name, name, new_code, x);
1620 add_assert_info (asserts, name, name, comp_code, val);
1622 /* In the case of NAME <= CST and NAME being defined as
1623 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1624 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
1625 This catches range and anti-range tests. */
1626 if ((comp_code == LE_EXPR
1627 || comp_code == GT_EXPR)
1628 && TREE_CODE (val) == INTEGER_CST
1629 && TYPE_UNSIGNED (TREE_TYPE (val)))
1631 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1632 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1634 /* Extract CST2 from the (optional) addition. */
1635 if (is_gimple_assign (def_stmt)
1636 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1638 name2 = gimple_assign_rhs1 (def_stmt);
1639 cst2 = gimple_assign_rhs2 (def_stmt);
1640 if (TREE_CODE (name2) == SSA_NAME
1641 && TREE_CODE (cst2) == INTEGER_CST)
1642 def_stmt = SSA_NAME_DEF_STMT (name2);
1645 /* Extract NAME2 from the (optional) sign-changing cast. */
1646 if (gassign *ass = dyn_cast <gassign *> (def_stmt))
1648 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass))
1649 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (ass)))
1650 && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (ass)))
1651 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (ass)))))
1652 name3 = gimple_assign_rhs1 (ass);
1655 /* If name3 is used later, create an ASSERT_EXPR for it. */
1656 if (name3 != NULL_TREE
1657 && TREE_CODE (name3) == SSA_NAME
1658 && (cst2 == NULL_TREE
1659 || TREE_CODE (cst2) == INTEGER_CST)
1660 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1664 /* Build an expression for the range test. */
1665 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1666 if (cst2 != NULL_TREE)
1667 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1668 add_assert_info (asserts, name3, tmp, comp_code, val);
1671 /* If name2 is used later, create an ASSERT_EXPR for it. */
1672 if (name2 != NULL_TREE
1673 && TREE_CODE (name2) == SSA_NAME
1674 && TREE_CODE (cst2) == INTEGER_CST
1675 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1679 /* Build an expression for the range test. */
1681 if (TREE_TYPE (name) != TREE_TYPE (name2))
1682 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1683 if (cst2 != NULL_TREE)
1684 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1685 add_assert_info (asserts, name2, tmp, comp_code, val);
1689 /* In the case of post-in/decrement tests like if (i++) ... and uses
1690 of the in/decremented value on the edge the extra name we want to
1691 assert for is not on the def chain of the name compared. Instead
1692 it is in the set of use stmts.
1693 Similar cases happen for conversions that were simplified through
1694 fold_{sign_changed,widened}_comparison. */
1695 if ((comp_code == NE_EXPR
1696 || comp_code == EQ_EXPR)
1697 && TREE_CODE (val) == INTEGER_CST)
1699 imm_use_iterator ui;
1701 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1703 if (!is_gimple_assign (use_stmt))
1706 /* Cut off to use-stmts that are dominating the predecessor. */
1707 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1710 tree name2 = gimple_assign_lhs (use_stmt);
1711 if (TREE_CODE (name2) != SSA_NAME)
1714 enum tree_code code = gimple_assign_rhs_code (use_stmt);
1716 if (code == PLUS_EXPR
1717 || code == MINUS_EXPR)
1719 cst = gimple_assign_rhs2 (use_stmt);
1720 if (TREE_CODE (cst) != INTEGER_CST)
1722 cst = int_const_binop (code, val, cst);
1724 else if (CONVERT_EXPR_CODE_P (code))
1726 /* For truncating conversions we cannot record
1728 if (comp_code == NE_EXPR
1729 && (TYPE_PRECISION (TREE_TYPE (name2))
1730 < TYPE_PRECISION (TREE_TYPE (name))))
1732 cst = fold_convert (TREE_TYPE (name2), val);
1737 if (TREE_OVERFLOW_P (cst))
1738 cst = drop_tree_overflow (cst);
1739 add_assert_info (asserts, name2, name2, comp_code, cst);
1743 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1744 && TREE_CODE (val) == INTEGER_CST)
1746 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1747 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1748 tree val2 = NULL_TREE;
1749 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1750 wide_int mask = wi::zero (prec);
1751 unsigned int nprec = prec;
1752 enum tree_code rhs_code = ERROR_MARK;
1754 if (is_gimple_assign (def_stmt))
1755 rhs_code = gimple_assign_rhs_code (def_stmt);
1757 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1758 assert that A != CST1 -+ CST2. */
1759 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1760 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1762 tree op0 = gimple_assign_rhs1 (def_stmt);
1763 tree op1 = gimple_assign_rhs2 (def_stmt);
1764 if (TREE_CODE (op0) == SSA_NAME
1765 && TREE_CODE (op1) == INTEGER_CST)
1767 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1768 ? MINUS_EXPR : PLUS_EXPR);
1769 op1 = int_const_binop (reverse_op, val, op1);
1770 if (TREE_OVERFLOW (op1))
1771 op1 = drop_tree_overflow (op1);
1772 add_assert_info (asserts, op0, op0, comp_code, op1);
1776 /* Add asserts for NAME cmp CST and NAME being defined
1777 as NAME = (int) NAME2. */
1778 if (!TYPE_UNSIGNED (TREE_TYPE (val))
1779 && (comp_code == LE_EXPR || comp_code == LT_EXPR
1780 || comp_code == GT_EXPR || comp_code == GE_EXPR)
1781 && gimple_assign_cast_p (def_stmt))
1783 name2 = gimple_assign_rhs1 (def_stmt);
1784 if (CONVERT_EXPR_CODE_P (rhs_code)
1785 && TREE_CODE (name2) == SSA_NAME
1786 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1787 && TYPE_UNSIGNED (TREE_TYPE (name2))
1788 && prec == TYPE_PRECISION (TREE_TYPE (name2))
1789 && (comp_code == LE_EXPR || comp_code == GT_EXPR
1790 || !tree_int_cst_equal (val,
1791 TYPE_MIN_VALUE (TREE_TYPE (val)))))
1794 enum tree_code new_comp_code = comp_code;
1796 cst = fold_convert (TREE_TYPE (name2),
1797 TYPE_MIN_VALUE (TREE_TYPE (val)));
1798 /* Build an expression for the range test. */
1799 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
1800 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
1801 fold_convert (TREE_TYPE (name2), val));
1802 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1804 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
1805 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
1806 build_int_cst (TREE_TYPE (name2), 1));
1808 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
1812 /* Add asserts for NAME cmp CST and NAME being defined as
1813 NAME = NAME2 >> CST2.
1815 Extract CST2 from the right shift. */
1816 if (rhs_code == RSHIFT_EXPR)
1818 name2 = gimple_assign_rhs1 (def_stmt);
1819 cst2 = gimple_assign_rhs2 (def_stmt);
1820 if (TREE_CODE (name2) == SSA_NAME
1821 && tree_fits_uhwi_p (cst2)
1822 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1823 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
1824 && type_has_mode_precision_p (TREE_TYPE (val)))
1826 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
1827 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
1830 if (val2 != NULL_TREE
1831 && TREE_CODE (val2) == INTEGER_CST
1832 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
1836 enum tree_code new_comp_code = comp_code;
1840 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
1842 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
1844 tree type = build_nonstandard_integer_type (prec, 1);
1845 tmp = build1 (NOP_EXPR, type, name2);
1846 val2 = fold_convert (type, val2);
1848 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
1849 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
1850 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
1852 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1855 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1857 if (minval == wi::to_wide (new_val))
1858 new_val = NULL_TREE;
1863 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1864 mask |= wi::to_wide (val2);
1865 if (wi::eq_p (mask, maxval))
1866 new_val = NULL_TREE;
1868 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
1872 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
1875 /* If we have a conversion that doesn't change the value of the source
1876 simply register the same assert for it. */
1877 if (CONVERT_EXPR_CODE_P (rhs_code))
1880 tree rhs1 = gimple_assign_rhs1 (def_stmt);
1881 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1882 && TREE_CODE (rhs1) == SSA_NAME
1883 /* Make sure the relation preserves the upper/lower boundary of
1884 the range conservatively. */
1885 && (comp_code == NE_EXPR
1886 || comp_code == EQ_EXPR
1887 || (TYPE_SIGN (TREE_TYPE (name))
1888 == TYPE_SIGN (TREE_TYPE (rhs1)))
1889 || ((comp_code == LE_EXPR
1890 || comp_code == LT_EXPR)
1891 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
1892 || ((comp_code == GE_EXPR
1893 || comp_code == GT_EXPR)
1894 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
1895 /* And the conversion does not alter the value we compare
1896 against and all values in rhs1 can be represented in
1897 the converted to type. */
1898 && int_fits_type_p (val, TREE_TYPE (rhs1))
1899 && ((TYPE_PRECISION (TREE_TYPE (name))
1900 > TYPE_PRECISION (TREE_TYPE (rhs1)))
1901 || ((get_range_query (cfun)->range_of_expr (vr, rhs1)
1902 && vr.kind () == VR_RANGE)
1903 && wi::fits_to_tree_p
1904 (widest_int::from (vr.lower_bound (),
1905 TYPE_SIGN (TREE_TYPE (rhs1))),
1907 && wi::fits_to_tree_p
1908 (widest_int::from (vr.upper_bound (),
1909 TYPE_SIGN (TREE_TYPE (rhs1))),
1910 TREE_TYPE (name)))))
1911 add_assert_info (asserts, rhs1, rhs1,
1912 comp_code, fold_convert (TREE_TYPE (rhs1), val));
1915 /* Add asserts for NAME cmp CST and NAME being defined as
1916 NAME = NAME2 & CST2.
1918 Extract CST2 from the and.
1921 NAME = (unsigned) NAME2;
1922 casts where NAME's type is unsigned and has smaller precision
1923 than NAME2's type as if it was NAME = NAME2 & MASK. */
1924 names[0] = NULL_TREE;
1925 names[1] = NULL_TREE;
1927 if (rhs_code == BIT_AND_EXPR
1928 || (CONVERT_EXPR_CODE_P (rhs_code)
1929 && INTEGRAL_TYPE_P (TREE_TYPE (val))
1930 && TYPE_UNSIGNED (TREE_TYPE (val))
1931 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1934 name2 = gimple_assign_rhs1 (def_stmt);
1935 if (rhs_code == BIT_AND_EXPR)
1936 cst2 = gimple_assign_rhs2 (def_stmt);
1939 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
1940 nprec = TYPE_PRECISION (TREE_TYPE (name2));
1942 if (TREE_CODE (name2) == SSA_NAME
1943 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1944 && TREE_CODE (cst2) == INTEGER_CST
1945 && !integer_zerop (cst2)
1947 || TYPE_UNSIGNED (TREE_TYPE (val))))
1949 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
1950 if (gimple_assign_cast_p (def_stmt2))
1952 names[1] = gimple_assign_rhs1 (def_stmt2);
1953 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
1954 || TREE_CODE (names[1]) != SSA_NAME
1955 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
1956 || (TYPE_PRECISION (TREE_TYPE (name2))
1957 != TYPE_PRECISION (TREE_TYPE (names[1]))))
1958 names[1] = NULL_TREE;
1963 if (names[0] || names[1])
1965 wide_int minv, maxv, valv, cst2v;
1966 wide_int tem, sgnbit;
1967 bool valid_p = false, valn, cst2n;
1968 enum tree_code ccode = comp_code;
1970 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
1971 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
1972 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
1973 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
1974 /* If CST2 doesn't have most significant bit set,
1975 but VAL is negative, we have comparison like
1976 if ((x & 0x123) > -4) (always true). Just give up. */
1980 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1982 sgnbit = wi::zero (nprec);
1983 minv = valv & cst2v;
1987 /* Minimum unsigned value for equality is VAL & CST2
1988 (should be equal to VAL, otherwise we probably should
1989 have folded the comparison into false) and
1990 maximum unsigned value is VAL | ~CST2. */
1991 maxv = valv | ~cst2v;
1996 tem = valv | ~cst2v;
1997 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2001 sgnbit = wi::zero (nprec);
2004 /* If (VAL | ~CST2) is all ones, handle it as
2005 (X & CST2) < VAL. */
2010 sgnbit = wi::zero (nprec);
2013 if (!cst2n && wi::neg_p (cst2v))
2014 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2023 if (tem == wi::mask (nprec - 1, false, nprec))
2029 sgnbit = wi::zero (nprec);
2034 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2035 is VAL and maximum unsigned value is ~0. For signed
2036 comparison, if CST2 doesn't have most significant bit
2037 set, handle it similarly. If CST2 has MSB set,
2038 the minimum is the same, and maximum is ~0U/2. */
2041 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2043 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2047 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2053 /* Find out smallest MINV where MINV > VAL
2054 && (MINV & CST2) == MINV, if any. If VAL is signed and
2055 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2056 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2059 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2064 /* Minimum unsigned value for <= is 0 and maximum
2065 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2066 Otherwise, find smallest VAL2 where VAL2 > VAL
2067 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2069 For signed comparison, if CST2 doesn't have most
2070 significant bit set, handle it similarly. If CST2 has
2071 MSB set, the maximum is the same and minimum is INT_MIN. */
2076 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2088 /* Minimum unsigned value for < is 0 and maximum
2089 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2090 Otherwise, find smallest VAL2 where VAL2 > VAL
2091 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2093 For signed comparison, if CST2 doesn't have most
2094 significant bit set, handle it similarly. If CST2 has
2095 MSB set, the maximum is the same and minimum is INT_MIN. */
2104 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2118 && (maxv - minv) != -1)
2120 tree tmp, new_val, type;
2123 for (i = 0; i < 2; i++)
2126 wide_int maxv2 = maxv;
2128 type = TREE_TYPE (names[i]);
2129 if (!TYPE_UNSIGNED (type))
2131 type = build_nonstandard_integer_type (nprec, 1);
2132 tmp = build1 (NOP_EXPR, type, names[i]);
2136 tmp = build2 (PLUS_EXPR, type, tmp,
2137 wide_int_to_tree (type, -minv));
2138 maxv2 = maxv - minv;
2140 new_val = wide_int_to_tree (type, maxv2);
2141 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
2148 /* OP is an operand of a truth value expression which is known to have
2149 a particular value. Register any asserts for OP and for any
2150 operands in OP's defining statement.
2152 If CODE is EQ_EXPR, then we want to register OP is zero (false),
2153 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
2156 register_edge_assert_for_1 (tree op, enum tree_code code,
2157 edge e, vec<assert_info> &asserts)
2161 enum tree_code rhs_code;
2163 /* We only care about SSA_NAMEs. */
2164 if (TREE_CODE (op) != SSA_NAME)
2167 /* We know that OP will have a zero or nonzero value. */
2168 val = build_int_cst (TREE_TYPE (op), 0);
2169 add_assert_info (asserts, op, op, code, val);
2171 /* Now look at how OP is set. If it's set from a comparison,
2172 a truth operation or some bit operations, then we may be able
2173 to register information about the operands of that assignment. */
2174 op_def = SSA_NAME_DEF_STMT (op);
2175 if (gimple_code (op_def) != GIMPLE_ASSIGN)
2178 rhs_code = gimple_assign_rhs_code (op_def);
2180 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2182 bool invert = (code == EQ_EXPR ? true : false);
2183 tree op0 = gimple_assign_rhs1 (op_def);
2184 tree op1 = gimple_assign_rhs2 (op_def);
2186 if (TREE_CODE (op0) == SSA_NAME)
2187 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2188 if (TREE_CODE (op1) == SSA_NAME)
2189 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2191 else if ((code == NE_EXPR
2192 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2194 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2196 /* Recurse on each operand. */
2197 tree op0 = gimple_assign_rhs1 (op_def);
2198 tree op1 = gimple_assign_rhs2 (op_def);
2199 if (TREE_CODE (op0) == SSA_NAME
2200 && has_single_use (op0))
2201 register_edge_assert_for_1 (op0, code, e, asserts);
2202 if (TREE_CODE (op1) == SSA_NAME
2203 && has_single_use (op1))
2204 register_edge_assert_for_1 (op1, code, e, asserts);
2206 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2207 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2209 /* Recurse, flipping CODE. */
2210 code = invert_tree_comparison (code, false);
2211 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2213 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2215 /* Recurse through the copy. */
2216 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2218 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2220 /* Recurse through the type conversion, unless it is a narrowing
2221 conversion or conversion from non-integral type. */
2222 tree rhs = gimple_assign_rhs1 (op_def);
2223 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2224 && (TYPE_PRECISION (TREE_TYPE (rhs))
2225 <= TYPE_PRECISION (TREE_TYPE (op))))
2226 register_edge_assert_for_1 (rhs, code, e, asserts);
2230 /* Check if comparison
2231 NAME COND_OP INTEGER_CST
2233 (X & 11...100..0) COND_OP XX...X00...0
2234 Such comparison can yield assertions like
2237 in case of COND_OP being EQ_EXPR or
2240 in case of NE_EXPR. */
2243 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2244 tree *new_name, tree *low, enum tree_code *low_code,
2245 tree *high, enum tree_code *high_code)
2247 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2249 if (!is_gimple_assign (def_stmt)
2250 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2253 tree t = gimple_assign_rhs1 (def_stmt);
2254 tree maskt = gimple_assign_rhs2 (def_stmt);
2255 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2258 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2259 wide_int inv_mask = ~mask;
2260 /* Must have been removed by now so don't bother optimizing. */
2261 if (mask == 0 || inv_mask == 0)
2264 /* Assume VALT is INTEGER_CST. */
2265 wi::tree_to_wide_ref val = wi::to_wide (valt);
2267 if ((inv_mask & (inv_mask + 1)) != 0
2268 || (val & mask) != val)
2271 bool is_range = cond_code == EQ_EXPR;
2273 tree type = TREE_TYPE (t);
2274 wide_int min = wi::min_value (type),
2275 max = wi::max_value (type);
2279 *low_code = val == min ? ERROR_MARK : GE_EXPR;
2280 *high_code = val == max ? ERROR_MARK : LE_EXPR;
2284 /* We can still generate assertion if one of alternatives
2285 is known to always be false. */
2288 *low_code = (enum tree_code) 0;
2289 *high_code = GT_EXPR;
2291 else if ((val | inv_mask) == max)
2293 *low_code = LT_EXPR;
2294 *high_code = (enum tree_code) 0;
2301 *low = wide_int_to_tree (type, val);
2302 *high = wide_int_to_tree (type, val | inv_mask);
2307 /* Try to register an edge assertion for SSA name NAME on edge E for
2308 the condition COND contributing to the conditional jump pointed to by
2312 register_edge_assert_for (tree name, edge e,
2313 enum tree_code cond_code, tree cond_op0,
2314 tree cond_op1, vec<assert_info> &asserts)
2317 enum tree_code comp_code;
2318 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2320 /* Do not attempt to infer anything in names that flow through
2322 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2325 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2331 /* Register ASSERT_EXPRs for name. */
2332 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2333 cond_op1, is_else_edge, asserts);
2336 /* If COND is effectively an equality test of an SSA_NAME against
2337 the value zero or one, then we may be able to assert values
2338 for SSA_NAMEs which flow into COND. */
2340 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2341 statement of NAME we can assert both operands of the BIT_AND_EXPR
2342 have nonzero value. */
2343 if ((comp_code == EQ_EXPR && integer_onep (val))
2344 || (comp_code == NE_EXPR && integer_zerop (val)))
2346 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2348 if (is_gimple_assign (def_stmt)
2349 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2351 tree op0 = gimple_assign_rhs1 (def_stmt);
2352 tree op1 = gimple_assign_rhs2 (def_stmt);
2353 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2354 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2356 else if (is_gimple_assign (def_stmt)
2357 && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2359 register_edge_assert_for_1 (name, NE_EXPR, e, asserts);
2362 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2363 statement of NAME we can assert both operands of the BIT_IOR_EXPR
2365 if ((comp_code == EQ_EXPR && integer_zerop (val))
2366 || (comp_code == NE_EXPR
2367 && integer_onep (val)
2368 && TYPE_PRECISION (TREE_TYPE (name)) == 1))
2370 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2372 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2373 necessarily zero value, or if type-precision is one. */
2374 if (is_gimple_assign (def_stmt)
2375 && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
2377 tree op0 = gimple_assign_rhs1 (def_stmt);
2378 tree op1 = gimple_assign_rhs2 (def_stmt);
2379 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2380 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2382 else if (is_gimple_assign (def_stmt)
2383 && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2385 register_edge_assert_for_1 (name, EQ_EXPR, e, asserts);
2388 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
2389 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2390 && TREE_CODE (val) == INTEGER_CST)
2392 enum tree_code low_code, high_code;
2394 if (is_masked_range_test (name, val, comp_code, &name, &low,
2395 &low_code, &high, &high_code))
2397 if (low_code != ERROR_MARK)
2398 register_edge_assert_for_2 (name, e, low_code, name,
2399 low, /*invert*/false, asserts);
2400 if (high_code != ERROR_MARK)
2401 register_edge_assert_for_2 (name, e, high_code, name,
2402 high, /*invert*/false, asserts);
2414 __builtin_unreachable ();
2416 x_5 = ASSERT_EXPR <x_3, ...>;
2417 If x_3 has no other immediate uses (checked by caller),
2418 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
2419 from the non-zero bitmask. */
2422 maybe_set_nonzero_bits (edge e, tree var)
2424 basic_block cond_bb = e->src;
2425 gimple *stmt = last_stmt (cond_bb);
2429 || gimple_code (stmt) != GIMPLE_COND
2430 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
2431 ? EQ_EXPR : NE_EXPR)
2432 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
2433 || !integer_zerop (gimple_cond_rhs (stmt)))
2436 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2437 if (!is_gimple_assign (stmt)
2438 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
2439 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
2441 if (gimple_assign_rhs1 (stmt) != var)
2445 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
2447 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2448 if (!gimple_assign_cast_p (stmt2)
2449 || gimple_assign_rhs1 (stmt2) != var
2450 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
2451 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
2452 != TYPE_PRECISION (TREE_TYPE (var))))
2455 cst = gimple_assign_rhs2 (stmt);
2456 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
2457 wi::to_wide (cst)));
2460 /* Return true if STMT is interesting for VRP. */
2463 stmt_interesting_for_vrp (gimple *stmt)
2465 if (gimple_code (stmt) == GIMPLE_PHI)
2467 tree res = gimple_phi_result (stmt);
2468 return (!virtual_operand_p (res)
2469 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
2470 || POINTER_TYPE_P (TREE_TYPE (res))));
2472 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
2474 tree lhs = gimple_get_lhs (stmt);
2476 /* In general, assignments with virtual operands are not useful
2477 for deriving ranges, with the obvious exception of calls to
2478 builtin functions. */
2479 if (lhs && TREE_CODE (lhs) == SSA_NAME
2480 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2481 || POINTER_TYPE_P (TREE_TYPE (lhs)))
2482 && (is_gimple_call (stmt)
2483 || !gimple_vuse (stmt)))
2485 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
2486 switch (gimple_call_internal_fn (stmt))
2488 case IFN_ADD_OVERFLOW:
2489 case IFN_SUB_OVERFLOW:
2490 case IFN_MUL_OVERFLOW:
2491 case IFN_ATOMIC_COMPARE_EXCHANGE:
2492 /* These internal calls return _Complex integer type,
2493 but are interesting to VRP nevertheless. */
2494 if (lhs && TREE_CODE (lhs) == SSA_NAME)
2501 else if (gimple_code (stmt) == GIMPLE_COND
2502 || gimple_code (stmt) == GIMPLE_SWITCH)
2508 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
2509 that includes the value VAL. The search is restricted to the range
2510 [START_IDX, n - 1] where n is the size of VEC.
2512 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
2515 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
2516 it is placed in IDX and false is returned.
2518 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
2522 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
2524 size_t n = gimple_switch_num_labels (stmt);
2527 /* Find case label for minimum of the value range or the next one.
2528 At each iteration we are searching in [low, high - 1]. */
2530 for (low = start_idx, high = n; high != low; )
2534 /* Note that i != high, so we never ask for n. */
2535 size_t i = (high + low) / 2;
2536 t = gimple_switch_label (stmt, i);
2538 /* Cache the result of comparing CASE_LOW and val. */
2539 cmp = tree_int_cst_compare (CASE_LOW (t), val);
2543 /* Ranges cannot be empty. */
2552 if (CASE_HIGH (t) != NULL
2553 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2565 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
2566 for values between MIN and MAX. The first index is placed in MIN_IDX. The
2567 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
2568 then MAX_IDX < MIN_IDX.
2569 Returns true if the default label is not needed. */
2572 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
2576 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
2577 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
2581 && max_take_default)
2583 /* Only the default case label reached.
2584 Return an empty range. */
2591 bool take_default = min_take_default || max_take_default;
2595 if (max_take_default)
2598 /* If the case label range is continuous, we do not need
2599 the default case label. Verify that. */
2600 high = CASE_LOW (gimple_switch_label (stmt, i));
2601 if (CASE_HIGH (gimple_switch_label (stmt, i)))
2602 high = CASE_HIGH (gimple_switch_label (stmt, i));
2603 for (k = i + 1; k <= j; ++k)
2605 low = CASE_LOW (gimple_switch_label (stmt, k));
2606 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
2608 take_default = true;
2612 if (CASE_HIGH (gimple_switch_label (stmt, k)))
2613 high = CASE_HIGH (gimple_switch_label (stmt, k));
2618 return !take_default;
2622 /* Given a SWITCH_STMT, return the case label that encompasses the
2623 known possible values for the switch operand. RANGE_OF_OP is a
2624 range for the known values of the switch operand. */
2627 find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
2629 if (range_of_op->undefined_p ()
2630 || range_of_op->varying_p ()
2631 || range_of_op->symbolic_p ())
2635 tree op = gimple_switch_index (switch_stmt);
2636 tree type = TREE_TYPE (op);
2637 tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
2638 tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
2639 find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
2642 /* Look for exactly one label that encompasses the range of
2644 tree label = gimple_switch_label (switch_stmt, i);
2646 = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
2647 int_range_max label_range (CASE_LOW (label), case_high);
2648 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2649 range_cast (label_range, range_of_op->type ());
2650 label_range.intersect (*range_of_op);
2651 if (label_range == *range_of_op)
2656 /* If there are no labels at all, take the default. */
2657 return gimple_switch_label (switch_stmt, 0);
2661 /* Otherwise, there are various labels that can encompass
2662 the range of operand. In which case, see if the range of
2663 the operand is entirely *outside* the bounds of all the
2664 (non-default) case labels. If so, take the default. */
2665 unsigned n = gimple_switch_num_labels (switch_stmt);
2666 tree min_label = gimple_switch_label (switch_stmt, 1);
2667 tree max_label = gimple_switch_label (switch_stmt, n - 1);
2668 tree case_high = CASE_HIGH (max_label);
2670 case_high = CASE_LOW (max_label);
2671 int_range_max label_range (CASE_LOW (min_label), case_high);
2672 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2673 range_cast (label_range, range_of_op->type ());
2674 label_range.intersect (*range_of_op);
2675 if (label_range.undefined_p ())
2676 return gimple_switch_label (switch_stmt, 0);
2687 /* Location information for ASSERT_EXPRs. Each instance of this
2688 structure describes an ASSERT_EXPR for an SSA name. Since a single
2689 SSA name may have more than one assertion associated with it, these
2690 locations are kept in a linked list attached to the corresponding
2694 /* Basic block where the assertion would be inserted. */
2697 /* Some assertions need to be inserted on an edge (e.g., assertions
2698 generated by COND_EXPRs). In those cases, BB will be NULL. */
2701 /* Pointer to the statement that generated this assertion. */
2702 gimple_stmt_iterator si;
2704 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
2705 enum tree_code comp_code;
2707 /* Value being compared against. */
2710 /* Expression to compare. */
2713 /* Next node in the linked list. */
2717 /* Class to traverse the flowgraph looking for conditional jumps to
2718 insert ASSERT_EXPR range expressions. These range expressions are
2719 meant to provide information to optimizations that need to reason
2720 in terms of value ranges. They will not be expanded into RTL. */
2725 vrp_asserts (struct function *fn) : fun (fn) { }
2727 void insert_range_assertions ();
2729 /* Convert range assertion expressions into the implied copies and
2730 copy propagate away the copies. */
2731 void remove_range_assertions ();
2733 /* Dump all the registered assertions for all the names to FILE. */
2736 /* Dump all the registered assertions for NAME to FILE. */
2737 void dump (FILE *file, tree name);
2739 /* Dump all the registered assertions for NAME to stderr. */
2740 void debug (tree name)
2742 dump (stderr, name);
2745 /* Dump all the registered assertions for all the names to stderr. */
2752 /* Set of SSA names found live during the RPO traversal of the function
2753 for still active basic-blocks. */
2756 /* Function to work on. */
2757 struct function *fun;
2759 /* If bit I is present, it means that SSA name N_i has a list of
2760 assertions that should be inserted in the IL. */
2761 bitmap need_assert_for;
2763 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
2764 holds a list of ASSERT_LOCUS_T nodes that describe where
2765 ASSERT_EXPRs for SSA name N_I should be inserted. */
2766 assert_locus **asserts_for;
2768 /* Finish found ASSERTS for E and register them at GSI. */
2769 void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2770 vec<assert_info> &asserts);
2772 /* Determine whether the outgoing edges of BB should receive an
2773 ASSERT_EXPR for each of the operands of BB's LAST statement. The
2774 last statement of BB must be a SWITCH_EXPR.
2776 If any of the sub-graphs rooted at BB have an interesting use of
2777 the predicate operands, an assert location node is added to the
2778 list of assertions for the corresponding operands. */
2779 void find_switch_asserts (basic_block bb, gswitch *last);
2781 /* Do an RPO walk over the function computing SSA name liveness
2782 on-the-fly and deciding on assert expressions to insert. */
2783 void find_assert_locations ();
2785 /* Traverse all the statements in block BB looking for statements that
2786 may generate useful assertions for the SSA names in their operand.
2787 See method implementation comentary for more information. */
2788 void find_assert_locations_in_bb (basic_block bb);
2790 /* Determine whether the outgoing edges of BB should receive an
2791 ASSERT_EXPR for each of the operands of BB's LAST statement.
2792 The last statement of BB must be a COND_EXPR.
2794 If any of the sub-graphs rooted at BB have an interesting use of
2795 the predicate operands, an assert location node is added to the
2796 list of assertions for the corresponding operands. */
2797 void find_conditional_asserts (basic_block bb, gcond *last);
2799 /* Process all the insertions registered for every name N_i registered
2800 in NEED_ASSERT_FOR. The list of assertions to be inserted are
2801 found in ASSERTS_FOR[i]. */
2802 void process_assert_insertions ();
2804 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2805 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2806 E->DEST, then register this location as a possible insertion point
2807 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2809 BB, E and SI provide the exact insertion point for the new
2810 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2811 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2812 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2813 must not be NULL. */
2814 void register_new_assert_for (tree name, tree expr,
2815 enum tree_code comp_code,
2816 tree val, basic_block bb,
2817 edge e, gimple_stmt_iterator si);
2819 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2820 create a new SSA name N and return the assertion assignment
2821 'N = ASSERT_EXPR <V, V OP W>'. */
2822 gimple *build_assert_expr_for (tree cond, tree v);
2824 /* Create an ASSERT_EXPR for NAME and insert it in the location
2825 indicated by LOC. Return true if we made any edge insertions. */
2826 bool process_assert_insertions_for (tree name, assert_locus *loc);
2828 /* Qsort callback for sorting assert locations. */
2829 template <bool stable> static int compare_assert_loc (const void *,
2832 /* Return false if EXPR is a predicate expression involving floating
2834 bool fp_predicate (gimple *stmt)
2836 GIMPLE_CHECK (stmt, GIMPLE_COND);
2837 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2840 bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
2841 basic_block cond_bb);
2843 static int compare_case_labels (const void *, const void *);
2846 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2847 create a new SSA name N and return the assertion assignment
2848 'N = ASSERT_EXPR <V, V OP W>'. */
2851 vrp_asserts::build_assert_expr_for (tree cond, tree v)
2856 gcc_assert (TREE_CODE (v) == SSA_NAME
2857 && COMPARISON_CLASS_P (cond));
2859 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2860 assertion = gimple_build_assign (NULL_TREE, a);
2862 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2863 operand of the ASSERT_EXPR. Create it so the new name and the old one
2864 are registered in the replacement table so that we can fix the SSA web
2865 after adding all the ASSERT_EXPRs. */
2866 tree new_def = create_new_def_for (v, assertion, NULL);
2867 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2868 given we have to be able to fully propagate those out to re-create
2869 valid SSA when removing the asserts. */
2870 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2871 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2876 /* Dump all the registered assertions for NAME to FILE. */
2879 vrp_asserts::dump (FILE *file, tree name)
2883 fprintf (file, "Assertions to be inserted for ");
2884 print_generic_expr (file, name);
2885 fprintf (file, "\n");
2887 loc = asserts_for[SSA_NAME_VERSION (name)];
2890 fprintf (file, "\t");
2891 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2892 fprintf (file, "\n\tBB #%d", loc->bb->index);
2895 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2896 loc->e->dest->index);
2897 dump_edge_info (file, loc->e, dump_flags, 0);
2899 fprintf (file, "\n\tPREDICATE: ");
2900 print_generic_expr (file, loc->expr);
2901 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2902 print_generic_expr (file, loc->val);
2903 fprintf (file, "\n\n");
2907 fprintf (file, "\n");
2910 /* Dump all the registered assertions for all the names to FILE. */
2913 vrp_asserts::dump (FILE *file)
2918 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2919 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2920 dump (file, ssa_name (i));
2921 fprintf (file, "\n");
2924 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2925 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2926 E->DEST, then register this location as a possible insertion point
2927 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2929 BB, E and SI provide the exact insertion point for the new
2930 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2931 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2932 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2933 must not be NULL. */
2936 vrp_asserts::register_new_assert_for (tree name, tree expr,
2937 enum tree_code comp_code,
2941 gimple_stmt_iterator si)
2943 assert_locus *n, *loc, *last_loc;
2944 basic_block dest_bb;
2946 gcc_checking_assert (bb == NULL || e == NULL);
2949 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2950 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2952 /* Never build an assert comparing against an integer constant with
2953 TREE_OVERFLOW set. This confuses our undefined overflow warning
2955 if (TREE_OVERFLOW_P (val))
2956 val = drop_tree_overflow (val);
2958 /* The new assertion A will be inserted at BB or E. We need to
2959 determine if the new location is dominated by a previously
2960 registered location for A. If we are doing an edge insertion,
2961 assume that A will be inserted at E->DEST. Note that this is not
2964 If E is a critical edge, it will be split. But even if E is
2965 split, the new block will dominate the same set of blocks that
2968 The reverse, however, is not true, blocks dominated by E->DEST
2969 will not be dominated by the new block created to split E. So,
2970 if the insertion location is on a critical edge, we will not use
2971 the new location to move another assertion previously registered
2972 at a block dominated by E->DEST. */
2973 dest_bb = (bb) ? bb : e->dest;
2975 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2976 VAL at a block dominating DEST_BB, then we don't need to insert a new
2977 one. Similarly, if the same assertion already exists at a block
2978 dominated by DEST_BB and the new location is not on a critical
2979 edge, then update the existing location for the assertion (i.e.,
2980 move the assertion up in the dominance tree).
2982 Note, this is implemented as a simple linked list because there
2983 should not be more than a handful of assertions registered per
2984 name. If this becomes a performance problem, a table hashed by
2985 COMP_CODE and VAL could be implemented. */
2986 loc = asserts_for[SSA_NAME_VERSION (name)];
2990 if (loc->comp_code == comp_code
2992 || operand_equal_p (loc->val, val, 0))
2993 && (loc->expr == expr
2994 || operand_equal_p (loc->expr, expr, 0)))
2996 /* If E is not a critical edge and DEST_BB
2997 dominates the existing location for the assertion, move
2998 the assertion up in the dominance tree by updating its
2999 location information. */
3000 if ((e == NULL || !EDGE_CRITICAL_P (e))
3001 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
3010 /* Update the last node of the list and move to the next one. */
3015 /* If we didn't find an assertion already registered for
3016 NAME COMP_CODE VAL, add a new one at the end of the list of
3017 assertions associated with NAME. */
3018 n = XNEW (struct assert_locus);
3022 n->comp_code = comp_code;
3030 asserts_for[SSA_NAME_VERSION (name)] = n;
3032 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
3035 /* Finish found ASSERTS for E and register them at GSI. */
3038 vrp_asserts::finish_register_edge_assert_for (edge e,
3039 gimple_stmt_iterator gsi,
3040 vec<assert_info> &asserts)
3042 for (unsigned i = 0; i < asserts.length (); ++i)
3043 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3044 reachable from E. */
3045 if (live.live_on_edge_p (asserts[i].name, e))
3046 register_new_assert_for (asserts[i].name, asserts[i].expr,
3047 asserts[i].comp_code, asserts[i].val,
3051 /* Determine whether the outgoing edges of BB should receive an
3052 ASSERT_EXPR for each of the operands of BB's LAST statement.
3053 The last statement of BB must be a COND_EXPR.
3055 If any of the sub-graphs rooted at BB have an interesting use of
3056 the predicate operands, an assert location node is added to the
3057 list of assertions for the corresponding operands. */
3060 vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
3062 gimple_stmt_iterator bsi;
3068 bsi = gsi_for_stmt (last);
3070 /* Look for uses of the operands in each of the sub-graphs
3071 rooted at BB. We need to check each of the outgoing edges
3072 separately, so that we know what kind of ASSERT_EXPR to
3074 FOR_EACH_EDGE (e, ei, bb->succs)
3079 /* Register the necessary assertions for each operand in the
3080 conditional predicate. */
3081 auto_vec<assert_info, 8> asserts;
3082 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3083 register_edge_assert_for (op, e,
3084 gimple_cond_code (last),
3085 gimple_cond_lhs (last),
3086 gimple_cond_rhs (last), asserts);
3087 finish_register_edge_assert_for (e, bsi, asserts);
3091 /* Compare two case labels sorting first by the destination bb index
3092 and then by the case value. */
3095 vrp_asserts::compare_case_labels (const void *p1, const void *p2)
3097 const struct case_info *ci1 = (const struct case_info *) p1;
3098 const struct case_info *ci2 = (const struct case_info *) p2;
3099 int idx1 = ci1->bb->index;
3100 int idx2 = ci2->bb->index;
3104 else if (idx1 == idx2)
3106 /* Make sure the default label is first in a group. */
3107 if (!CASE_LOW (ci1->expr))
3109 else if (!CASE_LOW (ci2->expr))
3112 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3113 CASE_LOW (ci2->expr));
3119 /* Determine whether the outgoing edges of BB should receive an
3120 ASSERT_EXPR for each of the operands of BB's LAST statement.
3121 The last statement of BB must be a SWITCH_EXPR.
3123 If any of the sub-graphs rooted at BB have an interesting use of
3124 the predicate operands, an assert location node is added to the
3125 list of assertions for the corresponding operands. */
3128 vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
3130 gimple_stmt_iterator bsi;
3133 struct case_info *ci;
3134 size_t n = gimple_switch_num_labels (last);
3135 #if GCC_VERSION >= 4000
3138 /* Work around GCC 3.4 bug (PR 37086). */
3139 volatile unsigned int idx;
3142 bsi = gsi_for_stmt (last);
3143 op = gimple_switch_index (last);
3144 if (TREE_CODE (op) != SSA_NAME)
3147 /* Build a vector of case labels sorted by destination label. */
3148 ci = XNEWVEC (struct case_info, n);
3149 for (idx = 0; idx < n; ++idx)
3151 ci[idx].expr = gimple_switch_label (last, idx);
3152 ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
3154 edge default_edge = find_edge (bb, ci[0].bb);
3155 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3157 for (idx = 0; idx < n; ++idx)
3160 tree cl = ci[idx].expr;
3161 basic_block cbb = ci[idx].bb;
3163 min = CASE_LOW (cl);
3164 max = CASE_HIGH (cl);
3166 /* If there are multiple case labels with the same destination
3167 we need to combine them to a single value range for the edge. */
3168 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3170 /* Skip labels until the last of the group. */
3173 } while (idx < n && cbb == ci[idx].bb);
3176 /* Pick up the maximum of the case label range. */
3177 if (CASE_HIGH (ci[idx].expr))
3178 max = CASE_HIGH (ci[idx].expr);
3180 max = CASE_LOW (ci[idx].expr);
3183 /* Can't extract a useful assertion out of a range that includes the
3185 if (min == NULL_TREE)
3188 /* Find the edge to register the assert expr on. */
3189 e = find_edge (bb, cbb);
3191 /* Register the necessary assertions for the operand in the
3193 auto_vec<assert_info, 8> asserts;
3194 register_edge_assert_for (op, e,
3195 max ? GE_EXPR : EQ_EXPR,
3196 op, fold_convert (TREE_TYPE (op), min),
3199 register_edge_assert_for (op, e, LE_EXPR, op,
3200 fold_convert (TREE_TYPE (op), max),
3202 finish_register_edge_assert_for (e, bsi, asserts);
3207 if (!live.live_on_edge_p (op, default_edge))
3210 /* Now register along the default label assertions that correspond to the
3211 anti-range of each label. */
3212 int insertion_limit = param_max_vrp_switch_assertions;
3213 if (insertion_limit == 0)
3216 /* We can't do this if the default case shares a label with another case. */
3217 tree default_cl = gimple_switch_default_label (last);
3218 for (idx = 1; idx < n; idx++)
3221 tree cl = gimple_switch_label (last, idx);
3222 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3225 min = CASE_LOW (cl);
3226 max = CASE_HIGH (cl);
3228 /* Combine contiguous case ranges to reduce the number of assertions
3230 for (idx = idx + 1; idx < n; idx++)
3232 tree next_min, next_max;
3233 tree next_cl = gimple_switch_label (last, idx);
3234 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3237 next_min = CASE_LOW (next_cl);
3238 next_max = CASE_HIGH (next_cl);
3240 wide_int difference = (wi::to_wide (next_min)
3241 - wi::to_wide (max ? max : min));
3242 if (wi::eq_p (difference, 1))
3243 max = next_max ? next_max : next_min;
3249 if (max == NULL_TREE)
3251 /* Register the assertion OP != MIN. */
3252 auto_vec<assert_info, 8> asserts;
3253 min = fold_convert (TREE_TYPE (op), min);
3254 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3256 finish_register_edge_assert_for (default_edge, bsi, asserts);
3260 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3261 which will give OP the anti-range ~[MIN,MAX]. */
3262 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3263 min = fold_convert (TREE_TYPE (uop), min);
3264 max = fold_convert (TREE_TYPE (uop), max);
3266 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3267 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3268 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3269 NULL, default_edge, bsi);
3272 if (--insertion_limit == 0)
3277 /* Traverse all the statements in block BB looking for statements that
3278 may generate useful assertions for the SSA names in their operand.
3279 If a statement produces a useful assertion A for name N_i, then the
3280 list of assertions already generated for N_i is scanned to
3281 determine if A is actually needed.
3283 If N_i already had the assertion A at a location dominating the
3284 current location, then nothing needs to be done. Otherwise, the
3285 new location for A is recorded instead.
3287 1- For every statement S in BB, all the variables used by S are
3288 added to bitmap FOUND_IN_SUBGRAPH.
3290 2- If statement S uses an operand N in a way that exposes a known
3291 value range for N, then if N was not already generated by an
3292 ASSERT_EXPR, create a new assert location for N. For instance,
3293 if N is a pointer and the statement dereferences it, we can
3294 assume that N is not NULL.
3296 3- COND_EXPRs are a special case of #2. We can derive range
3297 information from the predicate but need to insert different
3298 ASSERT_EXPRs for each of the sub-graphs rooted at the
3299 conditional block. If the last statement of BB is a conditional
3300 expression of the form 'X op Y', then
3302 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3304 b) If the conditional is the only entry point to the sub-graph
3305 corresponding to the THEN_CLAUSE, recurse into it. On
3306 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3307 an ASSERT_EXPR is added for the corresponding variable.
3309 c) Repeat step (b) on the ELSE_CLAUSE.
3311 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3320 In this case, an assertion on the THEN clause is useful to
3321 determine that 'a' is always 9 on that edge. However, an assertion
3322 on the ELSE clause would be unnecessary.
3324 4- If BB does not end in a conditional expression, then we recurse
3325 into BB's dominator children.
3327 At the end of the recursive traversal, every SSA name will have a
3328 list of locations where ASSERT_EXPRs should be added. When a new
3329 location for name N is found, it is registered by calling
3330 register_new_assert_for. That function keeps track of all the
3331 registered assertions to prevent adding unnecessary assertions.
3332 For instance, if a pointer P_4 is dereferenced more than once in a
3333 dominator tree, only the location dominating all the dereference of
3334 P_4 will receive an ASSERT_EXPR. */
3337 vrp_asserts::find_assert_locations_in_bb (basic_block bb)
3341 last = last_stmt (bb);
3343 /* If BB's last statement is a conditional statement involving integer
3344 operands, determine if we need to add ASSERT_EXPRs. */
3346 && gimple_code (last) == GIMPLE_COND
3347 && !fp_predicate (last)
3348 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3349 find_conditional_asserts (bb, as_a <gcond *> (last));
3351 /* If BB's last statement is a switch statement involving integer
3352 operands, determine if we need to add ASSERT_EXPRs. */
3354 && gimple_code (last) == GIMPLE_SWITCH
3355 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3356 find_switch_asserts (bb, as_a <gswitch *> (last));
3358 /* Traverse all the statements in BB marking used names and looking
3359 for statements that may infer assertions for their used operands. */
3360 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3367 stmt = gsi_stmt (si);
3369 if (is_gimple_debug (stmt))
3372 /* See if we can derive an assertion for any of STMT's operands. */
3373 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3376 enum tree_code comp_code;
3378 /* If op is not live beyond this stmt, do not bother to insert
3380 if (!live.live_on_block_p (op, bb))
3383 /* If OP is used in such a way that we can infer a value
3384 range for it, and we don't find a previous assertion for
3385 it, create a new assertion location node for OP. */
3386 if (infer_value_range (stmt, op, &comp_code, &value))
3388 /* If we are able to infer a nonzero value range for OP,
3389 then walk backwards through the use-def chain to see if OP
3390 was set via a typecast.
3392 If so, then we can also infer a nonzero value range
3393 for the operand of the NOP_EXPR. */
3394 if (comp_code == NE_EXPR && integer_zerop (value))
3397 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3399 while (is_gimple_assign (def_stmt)
3400 && CONVERT_EXPR_CODE_P
3401 (gimple_assign_rhs_code (def_stmt))
3403 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3405 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3407 t = gimple_assign_rhs1 (def_stmt);
3408 def_stmt = SSA_NAME_DEF_STMT (t);
3410 /* Note we want to register the assert for the
3411 operand of the NOP_EXPR after SI, not after the
3413 if (live.live_on_block_p (t, bb))
3414 register_new_assert_for (t, t, comp_code, value,
3419 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3424 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3426 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3427 live.clear (op, bb);
3430 /* Traverse all PHI nodes in BB, updating live. */
3431 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3434 use_operand_p arg_p;
3436 gphi *phi = si.phi ();
3437 tree res = gimple_phi_result (phi);
3439 if (virtual_operand_p (res))
3442 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3444 tree arg = USE_FROM_PTR (arg_p);
3445 if (TREE_CODE (arg) == SSA_NAME)
3449 live.clear (res, bb);
3453 /* Do an RPO walk over the function computing SSA name liveness
3454 on-the-fly and deciding on assert expressions to insert. */
3457 vrp_asserts::find_assert_locations (void)
3459 int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3460 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3461 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
3464 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3465 for (i = 0; i < rpo_cnt; ++i)
3468 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3469 the order we compute liveness and insert asserts we otherwise
3470 fail to insert asserts into the loop latch. */
3471 for (auto loop : loops_list (cfun, 0))
3473 i = loop->latch->index;
3474 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3475 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3476 !gsi_end_p (gsi); gsi_next (&gsi))
3478 gphi *phi = gsi.phi ();
3479 if (virtual_operand_p (gimple_phi_result (phi)))
3481 tree arg = gimple_phi_arg_def (phi, j);
3482 if (TREE_CODE (arg) == SSA_NAME)
3483 live.set (arg, loop->latch);
3487 for (i = rpo_cnt - 1; i >= 0; --i)
3489 basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3493 /* Process BB and update the live information with uses in
3495 find_assert_locations_in_bb (bb);
3497 /* Merge liveness into the predecessor blocks and free it. */
3498 if (!live.block_has_live_names_p (bb))
3501 FOR_EACH_EDGE (e, ei, bb->preds)
3503 int pred = e->src->index;
3504 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3507 live.merge (e->src, bb);
3509 if (bb_rpo[pred] < pred_rpo)
3510 pred_rpo = bb_rpo[pred];
3513 /* Record the RPO number of the last visited block that needs
3514 live information from this block. */
3515 last_rpo[rpo[i]] = pred_rpo;
3518 live.clear_block (bb);
3520 /* We can free all successors live bitmaps if all their
3521 predecessors have been visited already. */
3522 FOR_EACH_EDGE (e, ei, bb->succs)
3523 if (last_rpo[e->dest->index] == i)
3524 live.clear_block (e->dest);
3528 XDELETEVEC (bb_rpo);
3529 XDELETEVEC (last_rpo);
3532 /* Create an ASSERT_EXPR for NAME and insert it in the location
3533 indicated by LOC. Return true if we made any edge insertions. */
3536 vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
3538 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3541 gimple *assert_stmt;
3545 /* If we have X <=> X do not insert an assert expr for that. */
3546 if (loc->expr == loc->val)
3549 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3550 assert_stmt = build_assert_expr_for (cond, name);
3553 /* We have been asked to insert the assertion on an edge. This
3554 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3555 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3556 || (gimple_code (gsi_stmt (loc->si))
3559 gsi_insert_on_edge (loc->e, assert_stmt);
3563 /* If the stmt iterator points at the end then this is an insertion
3564 at the beginning of a block. */
3565 if (gsi_end_p (loc->si))
3567 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3568 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3572 /* Otherwise, we can insert right after LOC->SI iff the
3573 statement must not be the last statement in the block. */
3574 stmt = gsi_stmt (loc->si);
3575 if (!stmt_ends_bb_p (stmt))
3577 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3581 /* If STMT must be the last statement in BB, we can only insert new
3582 assertions on the non-abnormal edge out of BB. Note that since
3583 STMT is not control flow, there may only be one non-abnormal/eh edge
3585 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3586 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3588 gsi_insert_on_edge (e, assert_stmt);
3595 /* Qsort helper for sorting assert locations. If stable is true, don't
3596 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3597 on the other side some pointers might be NULL. */
3599 template <bool stable>
3601 vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
3603 assert_locus * const a = *(assert_locus * const *)pa;
3604 assert_locus * const b = *(assert_locus * const *)pb;
3606 /* If stable, some asserts might be optimized away already, sort
3616 if (a->e == NULL && b->e != NULL)
3618 else if (a->e != NULL && b->e == NULL)
3621 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3622 no need to test both a->e and b->e. */
3624 /* Sort after destination index. */
3627 else if (a->e->dest->index > b->e->dest->index)
3629 else if (a->e->dest->index < b->e->dest->index)
3632 /* Sort after comp_code. */
3633 if (a->comp_code > b->comp_code)
3635 else if (a->comp_code < b->comp_code)
3640 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3641 uses DECL_UID of the VAR_DECL, so sorting might differ between
3642 -g and -g0. When doing the removal of redundant assert exprs
3643 and commonization to successors, this does not matter, but for
3644 the final sort needs to be stable. */
3652 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3653 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3656 /* Break the tie using hashing and source/bb index. */
3658 return (a->e != NULL
3659 ? a->e->src->index - b->e->src->index
3660 : a->bb->index - b->bb->index);
3661 return ha > hb ? 1 : -1;
3664 /* Process all the insertions registered for every name N_i registered
3665 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3666 found in ASSERTS_FOR[i]. */
3669 vrp_asserts::process_assert_insertions ()
3673 bool update_edges_p = false;
3674 int num_asserts = 0;
3676 if (dump_file && (dump_flags & TDF_DETAILS))
3679 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3681 assert_locus *loc = asserts_for[i];
3684 auto_vec<assert_locus *, 16> asserts;
3685 for (; loc; loc = loc->next)
3686 asserts.safe_push (loc);
3687 asserts.qsort (compare_assert_loc<false>);
3689 /* Push down common asserts to successors and remove redundant ones. */
3691 assert_locus *common = NULL;
3692 unsigned commonj = 0;
3693 for (unsigned j = 0; j < asserts.length (); ++j)
3699 || loc->e->dest != common->e->dest
3700 || loc->comp_code != common->comp_code
3701 || ! operand_equal_p (loc->val, common->val, 0)
3702 || ! operand_equal_p (loc->expr, common->expr, 0))
3708 else if (loc->e == asserts[j-1]->e)
3710 /* Remove duplicate asserts. */
3711 if (commonj == j - 1)
3716 free (asserts[j-1]);
3717 asserts[j-1] = NULL;
3722 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3724 /* We have the same assertion on all incoming edges of a BB.
3725 Insert it at the beginning of that block. */
3726 loc->bb = loc->e->dest;
3728 loc->si = gsi_none ();
3730 /* Clear asserts commoned. */
3731 for (; commonj != j; ++commonj)
3732 if (asserts[commonj])
3734 free (asserts[commonj]);
3735 asserts[commonj] = NULL;
3741 /* The asserts vector sorting above might be unstable for
3742 -fcompare-debug, sort again to ensure a stable sort. */
3743 asserts.qsort (compare_assert_loc<true>);
3744 for (unsigned j = 0; j < asserts.length (); ++j)
3749 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3756 gsi_commit_edge_inserts ();
3758 statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
3762 /* Traverse the flowgraph looking for conditional jumps to insert range
3763 expressions. These range expressions are meant to provide information
3764 to optimizations that need to reason in terms of value ranges. They
3765 will not be expanded into RTL. For instance, given:
3774 this pass will transform the code into:
3780 x = ASSERT_EXPR <x, x < y>
3785 y = ASSERT_EXPR <y, x >= y>
3789 The idea is that once copy and constant propagation have run, other
3790 optimizations will be able to determine what ranges of values can 'x'
3791 take in different paths of the code, simply by checking the reaching
3792 definition of 'x'. */
3795 vrp_asserts::insert_range_assertions (void)
3797 need_assert_for = BITMAP_ALLOC (NULL);
3798 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3800 calculate_dominance_info (CDI_DOMINATORS);
3802 find_assert_locations ();
3803 if (!bitmap_empty_p (need_assert_for))
3805 process_assert_insertions ();
3806 update_ssa (TODO_update_ssa_no_phi);
3809 if (dump_file && (dump_flags & TDF_DETAILS))
3811 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3812 dump_function_to_file (current_function_decl, dump_file, dump_flags);
3816 BITMAP_FREE (need_assert_for);
3819 /* Return true if all imm uses of VAR are either in STMT, or
3820 feed (optionally through a chain of single imm uses) GIMPLE_COND
3821 in basic block COND_BB. */
3824 vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
3826 basic_block cond_bb)
3828 use_operand_p use_p, use2_p;
3829 imm_use_iterator iter;
3831 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3832 if (USE_STMT (use_p) != stmt)
3834 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
3835 if (is_gimple_debug (use_stmt))
3837 while (is_gimple_assign (use_stmt)
3838 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
3839 && single_imm_use (gimple_assign_lhs (use_stmt),
3840 &use2_p, &use_stmt2))
3841 use_stmt = use_stmt2;
3842 if (gimple_code (use_stmt) != GIMPLE_COND
3843 || gimple_bb (use_stmt) != cond_bb)
3849 /* Convert range assertion expressions into the implied copies and
3850 copy propagate away the copies. Doing the trivial copy propagation
3851 here avoids the need to run the full copy propagation pass after
3854 FIXME, this will eventually lead to copy propagation removing the
3855 names that had useful range information attached to them. For
3856 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
3857 then N_i will have the range [3, +INF].
3859 However, by converting the assertion into the implied copy
3860 operation N_i = N_j, we will then copy-propagate N_j into the uses
3861 of N_i and lose the range information.
3863 The problem with keeping ASSERT_EXPRs around is that passes after
3864 VRP need to handle them appropriately.
3866 Another approach would be to make the range information a first
3867 class property of the SSA_NAME so that it can be queried from
3868 any pass. This is made somewhat more complex by the need for
3869 multiple ranges to be associated with one SSA_NAME. */
3872 vrp_asserts::remove_range_assertions ()
3875 gimple_stmt_iterator si;
3876 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
3877 a basic block preceeded by GIMPLE_COND branching to it and
3878 __builtin_trap, -1 if not yet checked, 0 otherwise. */
3881 /* Note that the BSI iterator bump happens at the bottom of the
3882 loop and no bump is necessary if we're removing the statement
3883 referenced by the current BSI. */
3884 FOR_EACH_BB_FN (bb, fun)
3885 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
3887 gimple *stmt = gsi_stmt (si);
3889 if (is_gimple_assign (stmt)
3890 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
3892 tree lhs = gimple_assign_lhs (stmt);
3893 tree rhs = gimple_assign_rhs1 (stmt);
3896 var = ASSERT_EXPR_VAR (rhs);
3898 if (TREE_CODE (var) == SSA_NAME
3899 && !POINTER_TYPE_P (TREE_TYPE (lhs))
3900 && SSA_NAME_RANGE_INFO (lhs))
3902 if (is_unreachable == -1)
3905 if (single_pred_p (bb)
3906 && assert_unreachable_fallthru_edge_p
3907 (single_pred_edge (bb)))
3911 if (x_7 >= 10 && x_7 < 20)
3912 __builtin_unreachable ();
3913 x_8 = ASSERT_EXPR <x_7, ...>;
3914 if the only uses of x_7 are in the ASSERT_EXPR and
3915 in the condition. In that case, we can copy the
3916 range info from x_8 computed in this pass also
3919 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
3922 if (SSA_NAME_RANGE_INFO (var))
3924 /* ?? This is a minor wart exposing the
3925 internals of SSA_NAME_RANGE_INFO in order
3926 to maintain existing behavior. This is
3927 because duplicate_ssa_name_range_info below
3928 needs a NULL destination range. This is
3929 all slated for removal... */
3930 ggc_free (SSA_NAME_RANGE_INFO (var));
3931 SSA_NAME_RANGE_INFO (var) = NULL;
3933 duplicate_ssa_name_range_info (var, lhs);
3934 maybe_set_nonzero_bits (single_pred_edge (bb), var);
3938 /* Propagate the RHS into every use of the LHS. For SSA names
3939 also propagate abnormals as it merely restores the original
3940 IL in this case (an replace_uses_by would assert). */
3941 if (TREE_CODE (var) == SSA_NAME)
3943 imm_use_iterator iter;
3944 use_operand_p use_p;
3946 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3947 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3948 SET_USE (use_p, var);
3951 replace_uses_by (lhs, var);
3953 /* And finally, remove the copy, it is not needed. */
3954 gsi_remove (&si, true);
3955 release_defs (stmt);
3959 if (!is_gimple_debug (gsi_stmt (si)))
3966 class vrp_prop : public ssa_propagation_engine
3969 vrp_prop (vr_values *v)
3970 : ssa_propagation_engine (),
3973 void initialize (struct function *);
3977 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) final override;
3978 enum ssa_prop_result visit_phi (gphi *) final override;
3980 struct function *fun;
3981 vr_values *m_vr_values;
3984 /* Initialization required by ssa_propagate engine. */
3987 vrp_prop::initialize (struct function *fn)
3992 FOR_EACH_BB_FN (bb, fun)
3994 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3997 gphi *phi = si.phi ();
3998 if (!stmt_interesting_for_vrp (phi))
4000 tree lhs = PHI_RESULT (phi);
4001 m_vr_values->set_def_to_varying (lhs);
4002 prop_set_simulate_again (phi, false);
4005 prop_set_simulate_again (phi, true);
4008 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
4011 gimple *stmt = gsi_stmt (si);
4013 /* If the statement is a control insn, then we do not
4014 want to avoid simulating the statement once. Failure
4015 to do so means that those edges will never get added. */
4016 if (stmt_ends_bb_p (stmt))
4017 prop_set_simulate_again (stmt, true);
4018 else if (!stmt_interesting_for_vrp (stmt))
4020 m_vr_values->set_defs_to_varying (stmt);
4021 prop_set_simulate_again (stmt, false);
4024 prop_set_simulate_again (stmt, true);
4029 /* Evaluate statement STMT. If the statement produces a useful range,
4030 return SSA_PROP_INTERESTING and record the SSA name with the
4031 interesting range into *OUTPUT_P.
4033 If STMT is a conditional branch and we can determine its truth
4034 value, the taken edge is recorded in *TAKEN_EDGE_P.
4036 If STMT produces a varying value, return SSA_PROP_VARYING. */
4038 enum ssa_prop_result
4039 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
4041 tree lhs = gimple_get_lhs (stmt);
4042 value_range_equiv vr;
4043 m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
4047 if (m_vr_values->update_value_range (*output_p, &vr))
4049 if (dump_file && (dump_flags & TDF_DETAILS))
4051 fprintf (dump_file, "Found new range for ");
4052 print_generic_expr (dump_file, *output_p);
4053 fprintf (dump_file, ": ");
4054 dump_value_range (dump_file, &vr);
4055 fprintf (dump_file, "\n");
4058 if (vr.varying_p ())
4059 return SSA_PROP_VARYING;
4061 return SSA_PROP_INTERESTING;
4063 return SSA_PROP_NOT_INTERESTING;
4066 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4067 switch (gimple_call_internal_fn (stmt))
4069 case IFN_ADD_OVERFLOW:
4070 case IFN_SUB_OVERFLOW:
4071 case IFN_MUL_OVERFLOW:
4072 case IFN_ATOMIC_COMPARE_EXCHANGE:
4073 /* These internal calls return _Complex integer type,
4074 which VRP does not track, but the immediate uses
4075 thereof might be interesting. */
4076 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4078 imm_use_iterator iter;
4079 use_operand_p use_p;
4080 enum ssa_prop_result res = SSA_PROP_VARYING;
4082 m_vr_values->set_def_to_varying (lhs);
4084 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
4086 gimple *use_stmt = USE_STMT (use_p);
4087 if (!is_gimple_assign (use_stmt))
4089 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
4090 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
4092 tree rhs1 = gimple_assign_rhs1 (use_stmt);
4093 tree use_lhs = gimple_assign_lhs (use_stmt);
4094 if (TREE_CODE (rhs1) != rhs_code
4095 || TREE_OPERAND (rhs1, 0) != lhs
4096 || TREE_CODE (use_lhs) != SSA_NAME
4097 || !stmt_interesting_for_vrp (use_stmt)
4098 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
4099 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
4100 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
4103 /* If there is a change in the value range for any of the
4104 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
4105 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
4106 or IMAGPART_EXPR immediate uses, but none of them have
4107 a change in their value ranges, return
4108 SSA_PROP_NOT_INTERESTING. If there are no
4109 {REAL,IMAG}PART_EXPR uses at all,
4110 return SSA_PROP_VARYING. */
4111 value_range_equiv new_vr;
4112 m_vr_values->extract_range_basic (&new_vr, use_stmt);
4113 const value_range_equiv *old_vr
4114 = m_vr_values->get_value_range (use_lhs);
4115 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
4116 res = SSA_PROP_INTERESTING;
4118 res = SSA_PROP_NOT_INTERESTING;
4119 new_vr.equiv_clear ();
4120 if (res == SSA_PROP_INTERESTING)
4134 /* All other statements produce nothing of interest for VRP, so mark
4135 their outputs varying and prevent further simulation. */
4136 m_vr_values->set_defs_to_varying (stmt);
4138 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
4141 /* Visit all arguments for PHI node PHI that flow through executable
4142 edges. If a valid value range can be derived from all the incoming
4143 value ranges, set a new range for the LHS of PHI. */
4145 enum ssa_prop_result
4146 vrp_prop::visit_phi (gphi *phi)
4148 tree lhs = PHI_RESULT (phi);
4149 value_range_equiv vr_result;
4150 m_vr_values->extract_range_from_phi_node (phi, &vr_result);
4151 if (m_vr_values->update_value_range (lhs, &vr_result))
4153 if (dump_file && (dump_flags & TDF_DETAILS))
4155 fprintf (dump_file, "Found new range for ");
4156 print_generic_expr (dump_file, lhs);
4157 fprintf (dump_file, ": ");
4158 dump_value_range (dump_file, &vr_result);
4159 fprintf (dump_file, "\n");
4162 if (vr_result.varying_p ())
4163 return SSA_PROP_VARYING;
4165 return SSA_PROP_INTERESTING;
4168 /* Nothing changed, don't add outgoing edges. */
4169 return SSA_PROP_NOT_INTERESTING;
4172 /* Traverse all the blocks folding conditionals with known ranges. */
4175 vrp_prop::finalize ()
4179 /* We have completed propagating through the lattice. */
4180 m_vr_values->set_lattice_propagation_complete ();
4184 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
4185 m_vr_values->dump (dump_file);
4186 fprintf (dump_file, "\n");
4189 /* Set value range to non pointer SSA_NAMEs. */
4190 for (i = 0; i < num_ssa_names; i++)
4192 tree name = ssa_name (i);
4196 const value_range_equiv *vr = m_vr_values->get_value_range (name);
4197 if (!name || vr->varying_p () || !vr->constant_p ())
4200 if (POINTER_TYPE_P (TREE_TYPE (name))
4201 && range_includes_zero_p (vr) == 0)
4202 set_ptr_nonnull (name);
4203 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
4204 set_range_info (name, *vr);
4208 class vrp_folder : public substitute_and_fold_engine
4211 vrp_folder (vr_values *v)
4212 : substitute_and_fold_engine (/* Fold all stmts. */ true),
4213 m_vr_values (v), simplifier (v)
4215 void simplify_casted_conds (function *fun);
4218 tree value_of_expr (tree name, gimple *stmt) override
4220 return m_vr_values->value_of_expr (name, stmt);
4222 bool fold_stmt (gimple_stmt_iterator *) final override;
4223 bool fold_predicate_in (gimple_stmt_iterator *);
4225 vr_values *m_vr_values;
4226 simplify_using_ranges simplifier;
4229 /* If the statement pointed by SI has a predicate whose value can be
4230 computed using the value range information computed by VRP, compute
4231 its value and return true. Otherwise, return false. */
4234 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4236 bool assignment_p = false;
4238 gimple *stmt = gsi_stmt (*si);
4240 if (is_gimple_assign (stmt)
4241 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4243 assignment_p = true;
4244 val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4245 gimple_assign_rhs1 (stmt),
4246 gimple_assign_rhs2 (stmt),
4249 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4250 val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4251 gimple_cond_lhs (cond_stmt),
4252 gimple_cond_rhs (cond_stmt),
4260 val = fold_convert (TREE_TYPE (gimple_assign_lhs (stmt)), val);
4264 fprintf (dump_file, "Folding predicate ");
4265 print_gimple_expr (dump_file, stmt, 0);
4266 fprintf (dump_file, " to ");
4267 print_generic_expr (dump_file, val);
4268 fprintf (dump_file, "\n");
4271 if (is_gimple_assign (stmt))
4272 gimple_assign_set_rhs_from_tree (si, val);
4275 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4276 gcond *cond_stmt = as_a <gcond *> (stmt);
4277 if (integer_zerop (val))
4278 gimple_cond_make_false (cond_stmt);
4279 else if (integer_onep (val))
4280 gimple_cond_make_true (cond_stmt);
4291 /* Callback for substitute_and_fold folding the stmt at *SI. */
4294 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4296 if (fold_predicate_in (si))
4299 return simplifier.simplify (si);
4302 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
4303 was set by a type conversion can often be rewritten to use the RHS
4304 of the type conversion. Do this optimization for all conditionals
4308 vrp_folder::simplify_casted_conds (function *fun)
4311 FOR_EACH_BB_FN (bb, fun)
4313 gimple *last = last_stmt (bb);
4314 if (last && gimple_code (last) == GIMPLE_COND)
4316 if (simplifier.simplify_casted_cond (as_a <gcond *> (last)))
4318 if (dump_file && (dump_flags & TDF_DETAILS))
4320 fprintf (dump_file, "Folded into: ");
4321 print_gimple_stmt (dump_file, last, 0, TDF_SLIM);
4322 fprintf (dump_file, "\n");
4329 /* Main entry point to VRP (Value Range Propagation). This pass is
4330 loosely based on J. R. C. Patterson, ``Accurate Static Branch
4331 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
4332 Programming Language Design and Implementation, pp. 67-78, 1995.
4333 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
4335 This is essentially an SSA-CCP pass modified to deal with ranges
4336 instead of constants.
4338 While propagating ranges, we may find that two or more SSA name
4339 have equivalent, though distinct ranges. For instance,
4342 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
4344 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
4348 In the code above, pointer p_5 has range [q_2, q_2], but from the
4349 code we can also determine that p_5 cannot be NULL and, if q_2 had
4350 a non-varying range, p_5's range should also be compatible with it.
4352 These equivalences are created by two expressions: ASSERT_EXPR and
4353 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
4354 result of another assertion, then we can use the fact that p_5 and
4355 p_4 are equivalent when evaluating p_5's range.
4357 Together with value ranges, we also propagate these equivalences
4358 between names so that we can take advantage of information from
4359 multiple ranges when doing final replacement. Note that this
4360 equivalency relation is transitive but not symmetric.
4362 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
4363 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
4364 in contexts where that assertion does not hold (e.g., in line 6).
4366 TODO, the main difference between this pass and Patterson's is that
4367 we do not propagate edge probabilities. We only compute whether
4368 edges can be taken or not. That is, instead of having a spectrum
4369 of jump probabilities between 0 and 1, we only deal with 0, 1 and
4370 DON'T KNOW. In the future, it may be worthwhile to propagate
4371 probabilities to aid branch prediction. */
4374 execute_vrp (struct function *fun, bool warn_array_bounds_p)
4376 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4377 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4380 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
4381 Inserting assertions may split edges which will invalidate
4383 vrp_asserts assert_engine (fun);
4384 assert_engine.insert_range_assertions ();
4386 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
4387 mark_dfs_back_edges ();
4389 vr_values vrp_vr_values;
4391 class vrp_prop vrp_prop (&vrp_vr_values);
4392 vrp_prop.initialize (fun);
4393 vrp_prop.ssa_propagate ();
4395 /* Instantiate the folder here, so that edge cleanups happen at the
4396 end of this function. */
4397 vrp_folder folder (&vrp_vr_values);
4398 vrp_prop.finalize ();
4400 /* If we're checking array refs, we want to merge information on
4401 the executability of each edge between vrp_folder and the
4402 check_array_bounds_dom_walker: each can clear the
4403 EDGE_EXECUTABLE flag on edges, in different ways.
4405 Hence, if we're going to call check_all_array_refs, set
4406 the flag on every edge now, rather than in
4407 check_array_bounds_dom_walker's ctor; vrp_folder may clear
4408 it from some edges. */
4409 if (warn_array_bounds && warn_array_bounds_p)
4410 set_all_edges_as_executable (fun);
4412 folder.substitute_and_fold ();
4414 if (warn_array_bounds && warn_array_bounds_p)
4416 array_bounds_checker array_checker (fun, &vrp_vr_values);
4417 array_checker.check ();
4420 folder.simplify_casted_conds (fun);
4422 free_numbers_of_iterations_estimates (fun);
4424 assert_engine.remove_range_assertions ();
4427 loop_optimizer_finalize ();
4431 // This is a ranger based folder which continues to use the dominator
4432 // walk to access the substitute and fold machinery. Ranges are calculated
4435 class rvrp_folder : public substitute_and_fold_engine
4439 rvrp_folder (gimple_ranger *r) : substitute_and_fold_engine (),
4441 m_simplifier (r, r->non_executable_edge_flag)
4444 m_pta = new pointer_equiv_analyzer (m_ranger);
4445 m_last_bb_stmt = NULL;
4453 tree value_of_expr (tree name, gimple *s = NULL) override
4455 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4456 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4458 tree ret = m_ranger->value_of_expr (name, s);
4459 if (!ret && supported_pointer_equiv_p (name))
4460 ret = m_pta->get_equiv (name);
4464 tree value_on_edge (edge e, tree name) override
4466 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4467 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4469 tree ret = m_ranger->value_on_edge (e, name);
4470 if (!ret && supported_pointer_equiv_p (name))
4471 ret = m_pta->get_equiv (name);
4475 tree value_of_stmt (gimple *s, tree name = NULL) override
4477 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4478 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4480 return m_ranger->value_of_stmt (s, name);
4483 void pre_fold_bb (basic_block bb) override
4486 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
4488 m_ranger->register_inferred_ranges (gsi.phi ());
4489 m_last_bb_stmt = last_stmt (bb);
4492 void post_fold_bb (basic_block bb) override
4495 if (cfun->after_inlining)
4496 m_unreachable.maybe_register_block (bb);
4499 void pre_fold_stmt (gimple *stmt) override
4501 m_pta->visit_stmt (stmt);
4502 // If this is the last stmt and there are inferred ranges, reparse the
4503 // block for transitive inferred ranges that occur earlier in the block.
4504 if (stmt == m_last_bb_stmt)
4505 m_ranger->register_transitive_inferred_ranges (gimple_bb (stmt));
4508 bool fold_stmt (gimple_stmt_iterator *gsi) override
4510 bool ret = m_simplifier.simplify (gsi);
4512 ret = m_ranger->fold_stmt (gsi, follow_single_use_edges);
4513 m_ranger->register_inferred_ranges (gsi_stmt (*gsi));
4517 remove_unreachable m_unreachable;
4519 DISABLE_COPY_AND_ASSIGN (rvrp_folder);
4520 gimple_ranger *m_ranger;
4521 simplify_using_ranges m_simplifier;
4522 pointer_equiv_analyzer *m_pta;
4523 gimple *m_last_bb_stmt;
4526 /* Main entry point for a VRP pass using just ranger. This can be called
4527 from anywhere to perform a VRP pass, including from EVRP. */
4530 execute_ranger_vrp (struct function *fun, bool warn_array_bounds_p,
4533 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4534 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4536 calculate_dominance_info (CDI_DOMINATORS);
4538 set_all_edges_as_executable (fun);
4539 gimple_ranger *ranger = enable_ranger (fun, false);
4540 rvrp_folder folder (ranger);
4541 folder.substitute_and_fold ();
4542 // Remove tagged builtin-unreachable and maybe update globals.
4543 folder.m_unreachable.remove_and_update_globals (final_p);
4544 if (dump_file && (dump_flags & TDF_DETAILS))
4545 ranger->dump (dump_file);
4547 if (warn_array_bounds && warn_array_bounds_p)
4549 // Set all edges as executable, except those ranger says aren't.
4550 int non_exec_flag = ranger->non_executable_edge_flag;
4552 FOR_ALL_BB_FN (bb, fun)
4556 FOR_EACH_EDGE (e, ei, bb->succs)
4557 if (e->flags & non_exec_flag)
4558 e->flags &= ~EDGE_EXECUTABLE;
4560 e->flags |= EDGE_EXECUTABLE;
4563 array_bounds_checker array_checker (fun, ranger);
4564 array_checker.check ();
4567 disable_ranger (fun);
4569 loop_optimizer_finalize ();
4575 const pass_data pass_data_vrp =
4577 GIMPLE_PASS, /* type */
4579 OPTGROUP_NONE, /* optinfo_flags */
4580 TV_TREE_VRP, /* tv_id */
4581 PROP_ssa, /* properties_required */
4582 0, /* properties_provided */
4583 0, /* properties_destroyed */
4584 0, /* todo_flags_start */
4585 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
4588 const pass_data pass_data_early_vrp =
4590 GIMPLE_PASS, /* type */
4592 OPTGROUP_NONE, /* optinfo_flags */
4593 TV_TREE_EARLY_VRP, /* tv_id */
4594 PROP_ssa, /* properties_required */
4595 0, /* properties_provided */
4596 0, /* properties_destroyed */
4597 0, /* todo_flags_start */
4598 ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
4601 static int vrp_pass_num = 0;
4602 class pass_vrp : public gimple_opt_pass
4605 pass_vrp (gcc::context *ctxt, const pass_data &data_)
4606 : gimple_opt_pass (data_, ctxt), data (data_), warn_array_bounds_p (false),
4607 my_pass (vrp_pass_num++)
4610 /* opt_pass methods: */
4611 opt_pass * clone () final override { return new pass_vrp (m_ctxt, data); }
4612 void set_pass_param (unsigned int n, bool param) final override
4614 gcc_assert (n == 0);
4615 warn_array_bounds_p = param;
4617 bool gate (function *) final override { return flag_tree_vrp != 0; }
4618 unsigned int execute (function *fun) final override
4622 return execute_ranger_vrp (fun, /*warn_array_bounds_p=*/false, false);
4624 if ((my_pass == 1 && param_vrp1_mode == VRP_MODE_RANGER)
4625 || (my_pass == 2 && param_vrp2_mode == VRP_MODE_RANGER))
4626 return execute_ranger_vrp (fun, warn_array_bounds_p, my_pass == 2);
4627 return execute_vrp (fun, warn_array_bounds_p);
4631 const pass_data &data;
4632 bool warn_array_bounds_p;
4634 }; // class pass_vrp
4636 const pass_data pass_data_assumptions =
4638 GIMPLE_PASS, /* type */
4639 "assumptions", /* name */
4640 OPTGROUP_NONE, /* optinfo_flags */
4641 TV_TREE_ASSUMPTIONS, /* tv_id */
4642 PROP_ssa, /* properties_required */
4643 PROP_assumptions_done, /* properties_provided */
4644 0, /* properties_destroyed */
4645 0, /* todo_flags_start */
4646 0, /* todo_flags_end */
4649 class pass_assumptions : public gimple_opt_pass
4652 pass_assumptions (gcc::context *ctxt)
4653 : gimple_opt_pass (pass_data_assumptions, ctxt)
4656 /* opt_pass methods: */
4657 bool gate (function *fun) final override { return fun->assume_function; }
4658 unsigned int execute (function *) final override
4662 fprintf (dump_file, "Assumptions :\n--------------\n");
4664 for (tree arg = DECL_ARGUMENTS (cfun->decl); arg; arg = DECL_CHAIN (arg))
4666 tree name = ssa_default_def (cfun, arg);
4667 if (!name || !gimple_range_ssa_p (name))
4669 tree type = TREE_TYPE (name);
4670 if (!Value_Range::supports_type_p (type))
4672 Value_Range assume_range (type);
4673 if (query.assume_range_p (assume_range, name))
4675 // Set the global range of NAME to anything calculated.
4676 set_range_info (name, assume_range);
4679 print_generic_expr (dump_file, name, TDF_SLIM);
4680 fprintf (dump_file, " -> ");
4681 assume_range.dump (dump_file);
4682 fputc ('\n', dump_file);
4688 fputc ('\n', dump_file);
4689 gimple_dump_cfg (dump_file, dump_flags & ~TDF_DETAILS);
4690 if (dump_flags & TDF_DETAILS)
4691 query.dump (dump_file);
4693 return TODO_discard_function;
4696 }; // class pass_assumptions
4701 make_pass_vrp (gcc::context *ctxt)
4703 return new pass_vrp (ctxt, pass_data_vrp);
4707 make_pass_early_vrp (gcc::context *ctxt)
4709 return new pass_vrp (ctxt, pass_data_early_vrp);
4713 make_pass_assumptions (gcc::context *ctx)
4715 return new pass_assumptions (ctx);