1 /* Code for range operators.
2 Copyright (C) 2017-2022 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
44 #include "gimple-walk.h"
47 #include "value-relation.h"
49 #include "tree-ssa-ccp.h"
51 // Convert irange bitmasks into a VALUE MASK pair suitable for calling CCP.
54 irange_to_masked_value (const irange &r, widest_int &value, widest_int &mask)
59 value = widest_int::from (r.lower_bound (), TYPE_SIGN (r.type ()));
63 mask = widest_int::from (r.get_nonzero_bits (), TYPE_SIGN (r.type ()));
68 // Update the known bitmasks in R when applying the operation CODE to
72 update_known_bitmask (irange &r, tree_code code,
73 const irange &lh, const irange &rh)
75 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ())
78 widest_int value, mask, lh_mask, rh_mask, lh_value, rh_value;
79 tree type = r.type ();
80 signop sign = TYPE_SIGN (type);
81 int prec = TYPE_PRECISION (type);
82 signop lh_sign = TYPE_SIGN (lh.type ());
83 signop rh_sign = TYPE_SIGN (rh.type ());
84 int lh_prec = TYPE_PRECISION (lh.type ());
85 int rh_prec = TYPE_PRECISION (rh.type ());
87 irange_to_masked_value (lh, lh_value, lh_mask);
88 irange_to_masked_value (rh, rh_value, rh_mask);
89 bit_value_binop (code, sign, prec, &value, &mask,
90 lh_sign, lh_prec, lh_value, lh_mask,
91 rh_sign, rh_prec, rh_value, rh_mask);
92 r.set_nonzero_bits (value | mask);
95 // Return the upper limit for a type.
97 static inline wide_int
98 max_limit (const_tree type)
100 return wi::max_value (TYPE_PRECISION (type) , TYPE_SIGN (type));
103 // Return the lower limit for a type.
105 static inline wide_int
106 min_limit (const_tree type)
108 return wi::min_value (TYPE_PRECISION (type) , TYPE_SIGN (type));
111 // Return false if shifting by OP is undefined behavior. Otherwise, return
112 // true and the range it is to be shifted by. This allows trimming out of
113 // undefined ranges, leaving only valid ranges if there are any.
116 get_shift_range (irange &r, tree type, const irange &op)
118 if (op.undefined_p ())
121 // Build valid range and intersect it with the shift range.
122 r = value_range (build_int_cst_type (op.type (), 0),
123 build_int_cst_type (op.type (), TYPE_PRECISION (type) - 1));
126 // If there are no valid ranges in the shift range, returned false.
127 if (r.undefined_p ())
132 // Return TRUE if 0 is within [WMIN, WMAX].
135 wi_includes_zero_p (tree type, const wide_int &wmin, const wide_int &wmax)
137 signop sign = TYPE_SIGN (type);
138 return wi::le_p (wmin, 0, sign) && wi::ge_p (wmax, 0, sign);
141 // Return TRUE if [WMIN, WMAX] is the singleton 0.
144 wi_zero_p (tree type, const wide_int &wmin, const wide_int &wmax)
146 unsigned prec = TYPE_PRECISION (type);
147 return wmin == wmax && wi::eq_p (wmin, wi::zero (prec));
150 // Default wide_int fold operation returns [MIN, MAX].
153 range_operator::wi_fold (irange &r, tree type,
154 const wide_int &lh_lb ATTRIBUTE_UNUSED,
155 const wide_int &lh_ub ATTRIBUTE_UNUSED,
156 const wide_int &rh_lb ATTRIBUTE_UNUSED,
157 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
159 gcc_checking_assert (r.supports_type_p (type));
160 r.set_varying (type);
163 // Call wi_fold, except further split small subranges into constants.
164 // This can provide better precision. For something 8 >> [0,1]
165 // Instead of [8, 16], we will produce [8,8][16,16]
168 range_operator::wi_fold_in_parts (irange &r, tree type,
169 const wide_int &lh_lb,
170 const wide_int &lh_ub,
171 const wide_int &rh_lb,
172 const wide_int &rh_ub) const
175 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
176 widest_int::from (rh_lb, TYPE_SIGN (type)));
177 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
178 widest_int::from (lh_lb, TYPE_SIGN (type)));
179 // If there are 2, 3, or 4 values in the RH range, do them separately.
180 // Call wi_fold_in_parts to check the RH side.
181 if (rh_range > 0 && rh_range < 4)
183 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
186 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
190 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
194 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
197 // Otherise check for 2, 3, or 4 values in the LH range and split them up.
198 // The RH side has been checked, so no recursion needed.
199 else if (lh_range > 0 && lh_range < 4)
201 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
204 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
208 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
212 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
215 // Otherwise just call wi_fold.
217 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
220 // The default for fold is to break all ranges into sub-ranges and
221 // invoke the wi_fold method on each sub-range pair.
224 range_operator::fold_range (irange &r, tree type,
227 relation_trio trio) const
229 gcc_checking_assert (r.supports_type_p (type));
230 if (empty_range_varying (r, type, lh, rh))
233 relation_kind rel = trio.op1_op2 ();
234 unsigned num_lh = lh.num_pairs ();
235 unsigned num_rh = rh.num_pairs ();
237 // If both ranges are single pairs, fold directly into the result range.
238 // If the number of subranges grows too high, produce a summary result as the
239 // loop becomes exponential with little benefit. See PR 103821.
240 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
242 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
243 rh.lower_bound (), rh.upper_bound ());
244 op1_op2_relation_effect (r, type, lh, rh, rel);
245 update_known_bitmask (r, m_code, lh, rh);
251 for (unsigned x = 0; x < num_lh; ++x)
252 for (unsigned y = 0; y < num_rh; ++y)
254 wide_int lh_lb = lh.lower_bound (x);
255 wide_int lh_ub = lh.upper_bound (x);
256 wide_int rh_lb = rh.lower_bound (y);
257 wide_int rh_ub = rh.upper_bound (y);
258 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
262 op1_op2_relation_effect (r, type, lh, rh, rel);
263 update_known_bitmask (r, m_code, lh, rh);
267 op1_op2_relation_effect (r, type, lh, rh, rel);
268 update_known_bitmask (r, m_code, lh, rh);
272 // The default for op1_range is to return false.
275 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
276 tree type ATTRIBUTE_UNUSED,
277 const irange &lhs ATTRIBUTE_UNUSED,
278 const irange &op2 ATTRIBUTE_UNUSED,
284 // The default for op2_range is to return false.
287 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
288 tree type ATTRIBUTE_UNUSED,
289 const irange &lhs ATTRIBUTE_UNUSED,
290 const irange &op1 ATTRIBUTE_UNUSED,
296 // The default relation routines return VREL_VARYING.
299 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
300 const irange &op1 ATTRIBUTE_UNUSED,
301 const irange &op2 ATTRIBUTE_UNUSED,
302 relation_kind rel ATTRIBUTE_UNUSED) const
308 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
309 const irange &op1 ATTRIBUTE_UNUSED,
310 const irange &op2 ATTRIBUTE_UNUSED,
311 relation_kind rel ATTRIBUTE_UNUSED) const
317 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED) const
322 // Default is no relation affects the LHS.
325 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
326 tree type ATTRIBUTE_UNUSED,
327 const irange &op1_range ATTRIBUTE_UNUSED,
328 const irange &op2_range ATTRIBUTE_UNUSED,
329 relation_kind rel ATTRIBUTE_UNUSED) const
334 // Create and return a range from a pair of wide-ints that are known
335 // to have overflowed (or underflowed).
338 value_range_from_overflowed_bounds (irange &r, tree type,
339 const wide_int &wmin,
340 const wide_int &wmax)
342 const signop sgn = TYPE_SIGN (type);
343 const unsigned int prec = TYPE_PRECISION (type);
345 wide_int tmin = wide_int::from (wmin, prec, sgn);
346 wide_int tmax = wide_int::from (wmax, prec, sgn);
351 if (wi::cmp (tmin, tmax, sgn) < 0)
354 if (wi::cmp (tmax, tem, sgn) > 0)
357 // If the anti-range would cover nothing, drop to varying.
358 // Likewise if the anti-range bounds are outside of the types
360 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
361 r.set_varying (type);
364 tree tree_min = wide_int_to_tree (type, tmin);
365 tree tree_max = wide_int_to_tree (type, tmax);
366 r.set (tree_min, tree_max, VR_ANTI_RANGE);
370 // Create and return a range from a pair of wide-ints. MIN_OVF and
371 // MAX_OVF describe any overflow that might have occurred while
372 // calculating WMIN and WMAX respectively.
375 value_range_with_overflow (irange &r, tree type,
376 const wide_int &wmin, const wide_int &wmax,
377 wi::overflow_type min_ovf = wi::OVF_NONE,
378 wi::overflow_type max_ovf = wi::OVF_NONE)
380 const signop sgn = TYPE_SIGN (type);
381 const unsigned int prec = TYPE_PRECISION (type);
382 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
384 // For one bit precision if max != min, then the range covers all
386 if (prec == 1 && wi::ne_p (wmax, wmin))
388 r.set_varying (type);
394 // If overflow wraps, truncate the values and adjust the range,
395 // kind, and bounds appropriately.
396 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
398 wide_int tmin = wide_int::from (wmin, prec, sgn);
399 wide_int tmax = wide_int::from (wmax, prec, sgn);
400 // If the limits are swapped, we wrapped around and cover
402 if (wi::gt_p (tmin, tmax, sgn))
403 r.set_varying (type);
405 // No overflow or both overflow or underflow. The range
406 // kind stays normal.
407 r.set (wide_int_to_tree (type, tmin),
408 wide_int_to_tree (type, tmax));
412 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
413 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
414 value_range_from_overflowed_bounds (r, type, wmin, wmax);
416 // Other underflow and/or overflow, drop to VR_VARYING.
417 r.set_varying (type);
421 // If both bounds either underflowed or overflowed, then the result
423 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
424 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
430 // If overflow does not wrap, saturate to [MIN, MAX].
431 wide_int new_lb, new_ub;
432 if (min_ovf == wi::OVF_UNDERFLOW)
433 new_lb = wi::min_value (prec, sgn);
434 else if (min_ovf == wi::OVF_OVERFLOW)
435 new_lb = wi::max_value (prec, sgn);
439 if (max_ovf == wi::OVF_UNDERFLOW)
440 new_ub = wi::min_value (prec, sgn);
441 else if (max_ovf == wi::OVF_OVERFLOW)
442 new_ub = wi::max_value (prec, sgn);
446 r.set (wide_int_to_tree (type, new_lb),
447 wide_int_to_tree (type, new_ub));
451 // Create and return a range from a pair of wide-ints. Canonicalize
452 // the case where the bounds are swapped. In which case, we transform
453 // [10,5] into [MIN,5][10,MAX].
456 create_possibly_reversed_range (irange &r, tree type,
457 const wide_int &new_lb, const wide_int &new_ub)
459 signop s = TYPE_SIGN (type);
460 // If the bounds are swapped, treat the result as if an overflow occured.
461 if (wi::gt_p (new_lb, new_ub, s))
462 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
464 // Otherwise it's just a normal range.
465 r.set (wide_int_to_tree (type, new_lb), wide_int_to_tree (type, new_ub));
468 // Return the summary information about boolean range LHS. If EMPTY/FULL,
469 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
472 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
474 // If there is no result, then this is unexecutable.
475 if (lhs.undefined_p ())
484 // For TRUE, we can't just test for [1,1] because Ada can have
485 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
486 if (lhs.contains_p (build_zero_cst (lhs.type ())))
488 r.set_varying (val_type);
496 class operator_equal : public range_operator
498 using range_operator::fold_range;
499 using range_operator::op1_range;
500 using range_operator::op2_range;
502 virtual bool fold_range (irange &r, tree type,
505 relation_trio = TRIO_VARYING) const;
506 virtual bool op1_range (irange &r, tree type,
509 relation_trio = TRIO_VARYING) const;
510 virtual bool op2_range (irange &r, tree type,
513 relation_trio = TRIO_VARYING) const;
514 virtual relation_kind op1_op2_relation (const irange &lhs) const;
517 // Check if the LHS range indicates a relation between OP1 and OP2.
520 equal_op1_op2_relation (const irange &lhs)
522 if (lhs.undefined_p ())
523 return VREL_UNDEFINED;
525 // FALSE = op1 == op2 indicates NE_EXPR.
529 // TRUE = op1 == op2 indicates EQ_EXPR.
530 if (!lhs.contains_p (build_zero_cst (lhs.type ())))
536 operator_equal::op1_op2_relation (const irange &lhs) const
538 return equal_op1_op2_relation (lhs);
543 operator_equal::fold_range (irange &r, tree type,
546 relation_trio rel) const
548 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
551 // We can be sure the values are always equal or not if both ranges
552 // consist of a single value, and then compare them.
553 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
554 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
556 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
557 r = range_true (type);
559 r = range_false (type);
563 // If ranges do not intersect, we know the range is not equal,
564 // otherwise we don't know anything for sure.
565 int_range_max tmp = op1;
567 if (tmp.undefined_p ())
568 r = range_false (type);
570 r = range_true_and_false (type);
576 operator_equal::op1_range (irange &r, tree type,
581 switch (get_bool_state (r, lhs, type))
584 // If it's true, the result is the same as OP2.
589 // If the result is false, the only time we know anything is
590 // if OP2 is a constant.
591 if (wi::eq_p (op2.lower_bound(), op2.upper_bound()))
597 r.set_varying (type);
607 operator_equal::op2_range (irange &r, tree type,
610 relation_trio rel) const
612 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
615 class operator_not_equal : public range_operator
617 using range_operator::fold_range;
618 using range_operator::op1_range;
619 using range_operator::op2_range;
621 virtual bool fold_range (irange &r, tree type,
624 relation_trio = TRIO_VARYING) const;
625 virtual bool op1_range (irange &r, tree type,
628 relation_trio = TRIO_VARYING) const;
629 virtual bool op2_range (irange &r, tree type,
632 relation_trio = TRIO_VARYING) const;
633 virtual relation_kind op1_op2_relation (const irange &lhs) const;
636 // Check if the LHS range indicates a relation between OP1 and OP2.
639 not_equal_op1_op2_relation (const irange &lhs)
641 if (lhs.undefined_p ())
642 return VREL_UNDEFINED;
644 // FALSE = op1 != op2 indicates EQ_EXPR.
648 // TRUE = op1 != op2 indicates NE_EXPR.
649 if (!lhs.contains_p (build_zero_cst (lhs.type ())))
655 operator_not_equal::op1_op2_relation (const irange &lhs) const
657 return not_equal_op1_op2_relation (lhs);
661 operator_not_equal::fold_range (irange &r, tree type,
664 relation_trio rel) const
666 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
669 // We can be sure the values are always equal or not if both ranges
670 // consist of a single value, and then compare them.
671 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
672 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
674 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
675 r = range_true (type);
677 r = range_false (type);
681 // If ranges do not intersect, we know the range is not equal,
682 // otherwise we don't know anything for sure.
683 int_range_max tmp = op1;
685 if (tmp.undefined_p ())
686 r = range_true (type);
688 r = range_true_and_false (type);
694 operator_not_equal::op1_range (irange &r, tree type,
699 switch (get_bool_state (r, lhs, type))
702 // If the result is true, the only time we know anything is if
703 // OP2 is a constant.
704 if (wi::eq_p (op2.lower_bound(), op2.upper_bound()))
710 r.set_varying (type);
714 // If it's false, the result is the same as OP2.
726 operator_not_equal::op2_range (irange &r, tree type,
729 relation_trio rel) const
731 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
734 // (X < VAL) produces the range of [MIN, VAL - 1].
737 build_lt (irange &r, tree type, const wide_int &val)
739 wi::overflow_type ov;
741 signop sgn = TYPE_SIGN (type);
743 // Signed 1 bit cannot represent 1 for subtraction.
745 lim = wi::add (val, -1, sgn, &ov);
747 lim = wi::sub (val, 1, sgn, &ov);
749 // If val - 1 underflows, check if X < MIN, which is an empty range.
753 r = int_range<1> (type, min_limit (type), lim);
756 // (X <= VAL) produces the range of [MIN, VAL].
759 build_le (irange &r, tree type, const wide_int &val)
761 r = int_range<1> (type, min_limit (type), val);
764 // (X > VAL) produces the range of [VAL + 1, MAX].
767 build_gt (irange &r, tree type, const wide_int &val)
769 wi::overflow_type ov;
771 signop sgn = TYPE_SIGN (type);
773 // Signed 1 bit cannot represent 1 for addition.
775 lim = wi::sub (val, -1, sgn, &ov);
777 lim = wi::add (val, 1, sgn, &ov);
778 // If val + 1 overflows, check is for X > MAX, which is an empty range.
782 r = int_range<1> (type, lim, max_limit (type));
785 // (X >= val) produces the range of [VAL, MAX].
788 build_ge (irange &r, tree type, const wide_int &val)
790 r = int_range<1> (type, val, max_limit (type));
794 class operator_lt : public range_operator
796 using range_operator::fold_range;
797 using range_operator::op1_range;
798 using range_operator::op2_range;
800 virtual bool fold_range (irange &r, tree type,
803 relation_trio = TRIO_VARYING) const;
804 virtual bool op1_range (irange &r, tree type,
807 relation_trio = TRIO_VARYING) const;
808 virtual bool op2_range (irange &r, tree type,
811 relation_trio = TRIO_VARYING) const;
812 virtual relation_kind op1_op2_relation (const irange &lhs) const;
815 // Check if the LHS range indicates a relation between OP1 and OP2.
818 lt_op1_op2_relation (const irange &lhs)
820 if (lhs.undefined_p ())
821 return VREL_UNDEFINED;
823 // FALSE = op1 < op2 indicates GE_EXPR.
827 // TRUE = op1 < op2 indicates LT_EXPR.
828 if (!lhs.contains_p (build_zero_cst (lhs.type ())))
834 operator_lt::op1_op2_relation (const irange &lhs) const
836 return lt_op1_op2_relation (lhs);
840 operator_lt::fold_range (irange &r, tree type,
843 relation_trio rel) const
845 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
848 signop sign = TYPE_SIGN (op1.type ());
849 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
851 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
852 r = range_true (type);
853 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
854 r = range_false (type);
855 // Use nonzero bits to determine if < 0 is false.
856 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
857 r = range_false (type);
859 r = range_true_and_false (type);
864 operator_lt::op1_range (irange &r, tree type,
869 switch (get_bool_state (r, lhs, type))
872 build_lt (r, type, op2.upper_bound ());
876 build_ge (r, type, op2.lower_bound ());
886 operator_lt::op2_range (irange &r, tree type,
891 switch (get_bool_state (r, lhs, type))
894 build_gt (r, type, op1.lower_bound ());
898 build_le (r, type, op1.upper_bound ());
908 class operator_le : public range_operator
910 using range_operator::fold_range;
911 using range_operator::op1_range;
912 using range_operator::op2_range;
914 virtual bool fold_range (irange &r, tree type,
917 relation_trio = TRIO_VARYING) const;
918 virtual bool op1_range (irange &r, tree type,
921 relation_trio = TRIO_VARYING) const;
922 virtual bool op2_range (irange &r, tree type,
925 relation_trio = TRIO_VARYING) const;
926 virtual relation_kind op1_op2_relation (const irange &lhs) const;
929 // Check if the LHS range indicates a relation between OP1 and OP2.
932 le_op1_op2_relation (const irange &lhs)
934 if (lhs.undefined_p ())
935 return VREL_UNDEFINED;
937 // FALSE = op1 <= op2 indicates GT_EXPR.
941 // TRUE = op1 <= op2 indicates LE_EXPR.
942 if (!lhs.contains_p (build_zero_cst (lhs.type ())))
948 operator_le::op1_op2_relation (const irange &lhs) const
950 return le_op1_op2_relation (lhs);
954 operator_le::fold_range (irange &r, tree type,
957 relation_trio rel) const
959 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
962 signop sign = TYPE_SIGN (op1.type ());
963 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
965 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
966 r = range_true (type);
967 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
968 r = range_false (type);
970 r = range_true_and_false (type);
975 operator_le::op1_range (irange &r, tree type,
980 switch (get_bool_state (r, lhs, type))
983 build_le (r, type, op2.upper_bound ());
987 build_gt (r, type, op2.lower_bound ());
997 operator_le::op2_range (irange &r, tree type,
1000 relation_trio) const
1002 switch (get_bool_state (r, lhs, type))
1005 build_ge (r, type, op1.lower_bound ());
1009 build_lt (r, type, op1.upper_bound ());
1019 class operator_gt : public range_operator
1021 using range_operator::fold_range;
1022 using range_operator::op1_range;
1023 using range_operator::op2_range;
1025 virtual bool fold_range (irange &r, tree type,
1028 relation_trio = TRIO_VARYING) const;
1029 virtual bool op1_range (irange &r, tree type,
1032 relation_trio = TRIO_VARYING) const;
1033 virtual bool op2_range (irange &r, tree type,
1036 relation_trio = TRIO_VARYING) const;
1037 virtual relation_kind op1_op2_relation (const irange &lhs) const;
1040 // Check if the LHS range indicates a relation between OP1 and OP2.
1043 gt_op1_op2_relation (const irange &lhs)
1045 if (lhs.undefined_p ())
1046 return VREL_UNDEFINED;
1048 // FALSE = op1 > op2 indicates LE_EXPR.
1052 // TRUE = op1 > op2 indicates GT_EXPR.
1053 if (!lhs.contains_p (build_zero_cst (lhs.type ())))
1055 return VREL_VARYING;
1059 operator_gt::op1_op2_relation (const irange &lhs) const
1061 return gt_op1_op2_relation (lhs);
1066 operator_gt::fold_range (irange &r, tree type,
1067 const irange &op1, const irange &op2,
1068 relation_trio rel) const
1070 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1073 signop sign = TYPE_SIGN (op1.type ());
1074 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1076 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1077 r = range_true (type);
1078 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1079 r = range_false (type);
1081 r = range_true_and_false (type);
1086 operator_gt::op1_range (irange &r, tree type,
1087 const irange &lhs, const irange &op2,
1088 relation_trio) const
1090 switch (get_bool_state (r, lhs, type))
1093 build_gt (r, type, op2.lower_bound ());
1097 build_le (r, type, op2.upper_bound ());
1107 operator_gt::op2_range (irange &r, tree type,
1110 relation_trio) const
1112 switch (get_bool_state (r, lhs, type))
1115 build_lt (r, type, op1.upper_bound ());
1119 build_ge (r, type, op1.lower_bound ());
1129 class operator_ge : public range_operator
1131 using range_operator::fold_range;
1132 using range_operator::op1_range;
1133 using range_operator::op2_range;
1135 virtual bool fold_range (irange &r, tree type,
1138 relation_trio = TRIO_VARYING) const;
1139 virtual bool op1_range (irange &r, tree type,
1142 relation_trio = TRIO_VARYING) const;
1143 virtual bool op2_range (irange &r, tree type,
1146 relation_trio = TRIO_VARYING) const;
1147 virtual relation_kind op1_op2_relation (const irange &lhs) const;
1150 // Check if the LHS range indicates a relation between OP1 and OP2.
1153 ge_op1_op2_relation (const irange &lhs)
1155 if (lhs.undefined_p ())
1156 return VREL_UNDEFINED;
1158 // FALSE = op1 >= op2 indicates LT_EXPR.
1162 // TRUE = op1 >= op2 indicates GE_EXPR.
1163 if (!lhs.contains_p (build_zero_cst (lhs.type ())))
1165 return VREL_VARYING;
1169 operator_ge::op1_op2_relation (const irange &lhs) const
1171 return ge_op1_op2_relation (lhs);
1175 operator_ge::fold_range (irange &r, tree type,
1178 relation_trio rel) const
1180 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1183 signop sign = TYPE_SIGN (op1.type ());
1184 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1186 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1187 r = range_true (type);
1188 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1189 r = range_false (type);
1191 r = range_true_and_false (type);
1196 operator_ge::op1_range (irange &r, tree type,
1199 relation_trio) const
1201 switch (get_bool_state (r, lhs, type))
1204 build_ge (r, type, op2.lower_bound ());
1208 build_lt (r, type, op2.upper_bound ());
1218 operator_ge::op2_range (irange &r, tree type,
1221 relation_trio) const
1223 switch (get_bool_state (r, lhs, type))
1226 build_le (r, type, op1.upper_bound ());
1230 build_gt (r, type, op1.lower_bound ());
1240 class operator_plus : public range_operator
1242 using range_operator::op1_range;
1243 using range_operator::op2_range;
1244 using range_operator::lhs_op1_relation;
1245 using range_operator::lhs_op2_relation;
1247 virtual bool op1_range (irange &r, tree type,
1250 relation_trio) const;
1251 virtual bool op2_range (irange &r, tree type,
1254 relation_trio) const;
1255 virtual void wi_fold (irange &r, tree type,
1256 const wide_int &lh_lb,
1257 const wide_int &lh_ub,
1258 const wide_int &rh_lb,
1259 const wide_int &rh_ub) const;
1260 virtual relation_kind lhs_op1_relation (const irange &lhs, const irange &op1,
1262 relation_kind rel) const;
1263 virtual relation_kind lhs_op2_relation (const irange &lhs, const irange &op1,
1265 relation_kind rel) const;
1268 // Check to see if the range of OP2 indicates anything about the relation
1269 // between LHS and OP1.
1272 operator_plus::lhs_op1_relation (const irange &lhs,
1275 relation_kind) const
1277 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1278 return VREL_VARYING;
1280 tree type = lhs.type ();
1281 unsigned prec = TYPE_PRECISION (type);
1282 wi::overflow_type ovf1, ovf2;
1283 signop sign = TYPE_SIGN (type);
1285 // LHS = OP1 + 0 indicates LHS == OP1.
1289 if (TYPE_OVERFLOW_WRAPS (type))
1291 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1292 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1295 ovf1 = ovf2 = wi::OVF_NONE;
1297 // Never wrapping additions.
1300 // Positive op2 means lhs > op1.
1301 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1303 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1306 // Negative op2 means lhs < op1.
1307 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1309 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1312 // Always wrapping additions.
1313 else if (ovf1 && ovf1 == ovf2)
1315 // Positive op2 means lhs < op1.
1316 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1318 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1321 // Negative op2 means lhs > op1.
1322 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1324 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1328 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1329 if (!range_includes_zero_p (&op2))
1332 return VREL_VARYING;
1335 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1339 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1340 const irange &op2, relation_kind rel) const
1342 return lhs_op1_relation (lhs, op2, op1, rel);
1346 operator_plus::wi_fold (irange &r, tree type,
1347 const wide_int &lh_lb, const wide_int &lh_ub,
1348 const wide_int &rh_lb, const wide_int &rh_ub) const
1350 wi::overflow_type ov_lb, ov_ub;
1351 signop s = TYPE_SIGN (type);
1352 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1353 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1354 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1357 // Given addition or subtraction, determine the possible NORMAL ranges and
1358 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1359 // Return the relation that exists between the LHS and OP1 in order for the
1360 // NORMAL range to apply.
1361 // a return value of VREL_VARYING means no ranges were applicable.
1363 static relation_kind
1364 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1367 relation_kind kind = VREL_VARYING;
1368 // For now, only deal with constant adds. This could be extended to ranges
1369 // when someone is so motivated.
1370 if (!offset.singleton_p () || offset.zero_p ())
1373 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1374 wide_int off = offset.lower_bound ();
1375 if (wi::neg_p (off, SIGNED))
1378 off = wi::neg (off);
1381 wi::overflow_type ov;
1382 tree type = offset.type ();
1383 unsigned prec = TYPE_PRECISION (type);
1386 // calculate the normal range and relation for the operation.
1390 lb = wi::zero (prec);
1391 ub = wi::sub (wi::to_wide (vrp_val_max (type)), off, UNSIGNED, &ov);
1398 ub = wi::to_wide (vrp_val_max (type));
1401 int_range<2> normal_range (type, lb, ub);
1402 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1405 r_normal = normal_range;
1409 // Once op1 has been calculated by operator_plus or operator_minus, check
1410 // to see if the relation passed causes any part of the calculation to
1411 // be not possible. ie
1412 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1413 // and that further refines a_2 to [0, 0].
1414 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1415 // relation between LHS relatoin OP1 and ADD_P is true for PLUS, false for
1416 // MINUS. IF any adjustment can be made, R will reflect it.
1419 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1422 if (r.undefined_p ())
1424 tree type = r.type ();
1425 // Check for unsigned overflow and calculate the overflow part.
1426 signop s = TYPE_SIGN (type);
1427 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1430 // Only work with <, <=, >, >= relations.
1431 if (!relation_lt_le_gt_ge_p (rel))
1434 // Get the ranges for this offset.
1435 int_range_max normal, overflow;
1436 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1438 // VREL_VARYING means there are no adjustments.
1439 if (k == VREL_VARYING)
1442 // If the relations match use the normal range, otherwise use overflow range.
1443 if (relation_intersect (k, rel) == k)
1444 r.intersect (normal);
1446 r.intersect (overflow);
1451 operator_plus::op1_range (irange &r, tree type,
1454 relation_trio trio) const
1456 if (lhs.undefined_p ())
1458 // Start with the default operation.
1459 range_op_handler minus (MINUS_EXPR, type);
1462 bool res = minus.fold_range (r, type, lhs, op2);
1463 relation_kind rel = trio.lhs_op2 ();
1464 // Check for a relation refinement.
1466 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1471 operator_plus::op2_range (irange &r, tree type,
1474 relation_trio rel) const
1476 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1480 class operator_minus : public range_operator
1482 using range_operator::fold_range;
1483 using range_operator::op1_range;
1484 using range_operator::op2_range;
1486 virtual bool op1_range (irange &r, tree type,
1489 relation_trio) const;
1490 virtual bool op2_range (irange &r, tree type,
1493 relation_trio) const;
1494 virtual void wi_fold (irange &r, tree type,
1495 const wide_int &lh_lb,
1496 const wide_int &lh_ub,
1497 const wide_int &rh_lb,
1498 const wide_int &rh_ub) const;
1499 virtual relation_kind lhs_op1_relation (const irange &lhs,
1502 relation_kind rel) const;
1503 virtual bool op1_op2_relation_effect (irange &lhs_range,
1505 const irange &op1_range,
1506 const irange &op2_range,
1507 relation_kind rel) const;
1511 operator_minus::wi_fold (irange &r, tree type,
1512 const wide_int &lh_lb, const wide_int &lh_ub,
1513 const wide_int &rh_lb, const wide_int &rh_ub) const
1515 wi::overflow_type ov_lb, ov_ub;
1516 signop s = TYPE_SIGN (type);
1517 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
1518 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
1519 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1523 // Return the relation between LHS and OP1 based on the relation between
1527 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
1528 const irange &, relation_kind rel) const
1530 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
1539 return VREL_VARYING;
1542 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1543 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1544 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1547 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
1548 const irange &op1_range ATTRIBUTE_UNUSED,
1549 const irange &op2_range ATTRIBUTE_UNUSED,
1552 if (rel == VREL_VARYING)
1555 int_range<2> rel_range;
1556 unsigned prec = TYPE_PRECISION (type);
1557 signop sgn = TYPE_SIGN (type);
1559 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1561 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
1562 else if (rel == VREL_NE)
1563 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1565 else if (TYPE_OVERFLOW_WRAPS (type))
1569 // For wrapping signed values and unsigned, if op1 > op2 or
1570 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1573 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1584 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1586 rel_range = int_range<2> (type, wi::one (prec),
1587 wi::max_value (prec, sgn));
1589 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1591 rel_range = int_range<2> (type, wi::zero (prec),
1592 wi::max_value (prec, sgn));
1594 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1596 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1597 wi::minus_one (prec));
1599 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1601 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1608 lhs_range.intersect (rel_range);
1613 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
1614 const irange &op1_range,
1615 const irange &op2_range,
1616 relation_kind rel) const
1618 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
1623 operator_minus::op1_range (irange &r, tree type,
1626 relation_trio trio) const
1628 if (lhs.undefined_p ())
1630 // Start with the default operation.
1631 range_op_handler minus (PLUS_EXPR, type);
1634 bool res = minus.fold_range (r, type, lhs, op2);
1635 relation_kind rel = trio.lhs_op2 ();
1637 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
1643 operator_minus::op2_range (irange &r, tree type,
1646 relation_trio) const
1648 if (lhs.undefined_p ())
1650 return fold_range (r, type, op1, lhs);
1654 class operator_pointer_diff : public range_operator
1656 virtual bool op1_op2_relation_effect (irange &lhs_range,
1658 const irange &op1_range,
1659 const irange &op2_range,
1660 relation_kind rel) const;
1664 operator_pointer_diff::op1_op2_relation_effect (irange &lhs_range, tree type,
1665 const irange &op1_range,
1666 const irange &op2_range,
1667 relation_kind rel) const
1669 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
1674 class operator_min : public range_operator
1677 virtual void wi_fold (irange &r, tree type,
1678 const wide_int &lh_lb,
1679 const wide_int &lh_ub,
1680 const wide_int &rh_lb,
1681 const wide_int &rh_ub) const;
1685 operator_min::wi_fold (irange &r, tree type,
1686 const wide_int &lh_lb, const wide_int &lh_ub,
1687 const wide_int &rh_lb, const wide_int &rh_ub) const
1689 signop s = TYPE_SIGN (type);
1690 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
1691 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
1692 value_range_with_overflow (r, type, new_lb, new_ub);
1696 class operator_max : public range_operator
1699 virtual void wi_fold (irange &r, tree type,
1700 const wide_int &lh_lb,
1701 const wide_int &lh_ub,
1702 const wide_int &rh_lb,
1703 const wide_int &rh_ub) const;
1707 operator_max::wi_fold (irange &r, tree type,
1708 const wide_int &lh_lb, const wide_int &lh_ub,
1709 const wide_int &rh_lb, const wide_int &rh_ub) const
1711 signop s = TYPE_SIGN (type);
1712 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
1713 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
1714 value_range_with_overflow (r, type, new_lb, new_ub);
1718 class cross_product_operator : public range_operator
1721 // Perform an operation between two wide-ints and place the result
1722 // in R. Return true if the operation overflowed.
1723 virtual bool wi_op_overflows (wide_int &r,
1726 const wide_int &) const = 0;
1728 // Calculate the cross product of two sets of sub-ranges and return it.
1729 void wi_cross_product (irange &r, tree type,
1730 const wide_int &lh_lb,
1731 const wide_int &lh_ub,
1732 const wide_int &rh_lb,
1733 const wide_int &rh_ub) const;
1736 // Calculate the cross product of two sets of ranges and return it.
1738 // Multiplications, divisions and shifts are a bit tricky to handle,
1739 // depending on the mix of signs we have in the two ranges, we need to
1740 // operate on different values to get the minimum and maximum values
1741 // for the new range. One approach is to figure out all the
1742 // variations of range combinations and do the operations.
1744 // However, this involves several calls to compare_values and it is
1745 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1746 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1747 // figure the smallest and largest values to form the new range.
1750 cross_product_operator::wi_cross_product (irange &r, tree type,
1751 const wide_int &lh_lb,
1752 const wide_int &lh_ub,
1753 const wide_int &rh_lb,
1754 const wide_int &rh_ub) const
1756 wide_int cp1, cp2, cp3, cp4;
1757 // Default to varying.
1758 r.set_varying (type);
1760 // Compute the 4 cross operations, bailing if we get an overflow we
1762 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
1764 if (wi::eq_p (lh_lb, lh_ub))
1766 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
1768 if (wi::eq_p (rh_lb, rh_ub))
1770 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
1772 if (wi::eq_p (lh_lb, lh_ub))
1774 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
1778 signop sign = TYPE_SIGN (type);
1779 if (wi::gt_p (cp1, cp2, sign))
1780 std::swap (cp1, cp2);
1781 if (wi::gt_p (cp3, cp4, sign))
1782 std::swap (cp3, cp4);
1784 // Choose min and max from the ordered pairs.
1785 wide_int res_lb = wi::min (cp1, cp3, sign);
1786 wide_int res_ub = wi::max (cp2, cp4, sign);
1787 value_range_with_overflow (r, type, res_lb, res_ub);
1791 class operator_mult : public cross_product_operator
1793 using range_operator::op1_range;
1794 using range_operator::op2_range;
1796 virtual void wi_fold (irange &r, tree type,
1797 const wide_int &lh_lb,
1798 const wide_int &lh_ub,
1799 const wide_int &rh_lb,
1800 const wide_int &rh_ub) const final override;
1801 virtual bool wi_op_overflows (wide_int &res, tree type,
1802 const wide_int &w0, const wide_int &w1)
1803 const final override;
1804 virtual bool op1_range (irange &r, tree type,
1807 relation_trio) const final override;
1808 virtual bool op2_range (irange &r, tree type,
1811 relation_trio) const final override;
1815 operator_mult::op1_range (irange &r, tree type,
1816 const irange &lhs, const irange &op2,
1817 relation_trio) const
1820 if (lhs.undefined_p ())
1823 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
1824 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
1825 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
1826 if (TYPE_OVERFLOW_WRAPS (type))
1829 if (op2.singleton_p (&offset) && !integer_zerop (offset))
1830 return range_op_handler (TRUNC_DIV_EXPR, type).fold_range (r, type,
1836 operator_mult::op2_range (irange &r, tree type,
1837 const irange &lhs, const irange &op1,
1838 relation_trio rel) const
1840 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1844 operator_mult::wi_op_overflows (wide_int &res, tree type,
1845 const wide_int &w0, const wide_int &w1) const
1847 wi::overflow_type overflow = wi::OVF_NONE;
1848 signop sign = TYPE_SIGN (type);
1849 res = wi::mul (w0, w1, sign, &overflow);
1850 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
1852 // For multiplication, the sign of the overflow is given
1853 // by the comparison of the signs of the operands.
1854 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
1855 res = wi::max_value (w0.get_precision (), sign);
1857 res = wi::min_value (w0.get_precision (), sign);
1864 operator_mult::wi_fold (irange &r, tree type,
1865 const wide_int &lh_lb, const wide_int &lh_ub,
1866 const wide_int &rh_lb, const wide_int &rh_ub) const
1868 if (TYPE_OVERFLOW_UNDEFINED (type))
1870 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
1874 // Multiply the ranges when overflow wraps. This is basically fancy
1875 // code so we don't drop to varying with an unsigned
1878 // This test requires 2*prec bits if both operands are signed and
1879 // 2*prec + 2 bits if either is not. Therefore, extend the values
1880 // using the sign of the result to PREC2. From here on out,
1881 // everthing is just signed math no matter what the input types
1884 signop sign = TYPE_SIGN (type);
1885 unsigned prec = TYPE_PRECISION (type);
1886 widest2_int min0 = widest2_int::from (lh_lb, sign);
1887 widest2_int max0 = widest2_int::from (lh_ub, sign);
1888 widest2_int min1 = widest2_int::from (rh_lb, sign);
1889 widest2_int max1 = widest2_int::from (rh_ub, sign);
1890 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
1891 widest2_int size = sizem1 + 1;
1893 // Canonicalize the intervals.
1894 if (sign == UNSIGNED)
1896 if (wi::ltu_p (size, min0 + max0))
1901 if (wi::ltu_p (size, min1 + max1))
1908 // Sort the 4 products so that min is in prod0 and max is in
1910 widest2_int prod0 = min0 * min1;
1911 widest2_int prod1 = min0 * max1;
1912 widest2_int prod2 = max0 * min1;
1913 widest2_int prod3 = max0 * max1;
1915 // min0min1 > max0max1
1917 std::swap (prod0, prod3);
1919 // min0max1 > max0min1
1921 std::swap (prod1, prod2);
1924 std::swap (prod0, prod1);
1927 std::swap (prod2, prod3);
1930 prod2 = prod3 - prod0;
1931 if (wi::geu_p (prod2, sizem1))
1933 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
1934 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
1935 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
1937 r.set (type, rh_lb, wi::max_value (prec, sign));
1939 zero.set_zero (type);
1943 // The range covers all values.
1944 r.set_varying (type);
1948 wide_int new_lb = wide_int::from (prod0, prec, sign);
1949 wide_int new_ub = wide_int::from (prod3, prec, sign);
1950 create_possibly_reversed_range (r, type, new_lb, new_ub);
1955 class operator_div : public cross_product_operator
1958 virtual void wi_fold (irange &r, tree type,
1959 const wide_int &lh_lb,
1960 const wide_int &lh_ub,
1961 const wide_int &rh_lb,
1962 const wide_int &rh_ub) const final override;
1963 virtual bool wi_op_overflows (wide_int &res, tree type,
1964 const wide_int &, const wide_int &)
1965 const final override;
1969 operator_div::wi_op_overflows (wide_int &res, tree type,
1970 const wide_int &w0, const wide_int &w1) const
1975 wi::overflow_type overflow = wi::OVF_NONE;
1976 signop sign = TYPE_SIGN (type);
1980 case EXACT_DIV_EXPR:
1981 case TRUNC_DIV_EXPR:
1982 res = wi::div_trunc (w0, w1, sign, &overflow);
1984 case FLOOR_DIV_EXPR:
1985 res = wi::div_floor (w0, w1, sign, &overflow);
1987 case ROUND_DIV_EXPR:
1988 res = wi::div_round (w0, w1, sign, &overflow);
1991 res = wi::div_ceil (w0, w1, sign, &overflow);
1997 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
1999 // For division, the only case is -INF / -1 = +INF.
2000 res = wi::max_value (w0.get_precision (), sign);
2007 operator_div::wi_fold (irange &r, tree type,
2008 const wide_int &lh_lb, const wide_int &lh_ub,
2009 const wide_int &rh_lb, const wide_int &rh_ub) const
2011 const wide_int dividend_min = lh_lb;
2012 const wide_int dividend_max = lh_ub;
2013 const wide_int divisor_min = rh_lb;
2014 const wide_int divisor_max = rh_ub;
2015 signop sign = TYPE_SIGN (type);
2016 unsigned prec = TYPE_PRECISION (type);
2017 wide_int extra_min, extra_max;
2019 // If we know we won't divide by zero, just do the division.
2020 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2022 wi_cross_product (r, type, dividend_min, dividend_max,
2023 divisor_min, divisor_max);
2027 // If we're definitely dividing by zero, there's nothing to do.
2028 if (wi_zero_p (type, divisor_min, divisor_max))
2034 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2035 // skip any division by zero.
2037 // First divide by the negative numbers, if any.
2038 if (wi::neg_p (divisor_min, sign))
2039 wi_cross_product (r, type, dividend_min, dividend_max,
2040 divisor_min, wi::minus_one (prec));
2044 // Then divide by the non-zero positive numbers, if any.
2045 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2048 wi_cross_product (tmp, type, dividend_min, dividend_max,
2049 wi::one (prec), divisor_max);
2052 // We shouldn't still have undefined here.
2053 gcc_checking_assert (!r.undefined_p ());
2057 class operator_exact_divide : public operator_div
2059 using range_operator::op1_range;
2061 virtual bool op1_range (irange &r, tree type,
2064 relation_trio) const;
2069 operator_exact_divide::op1_range (irange &r, tree type,
2072 relation_trio) const
2074 if (lhs.undefined_p ())
2077 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2078 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2079 // We wont bother trying to enumerate all the in between stuff :-P
2080 // TRUE accuraacy is [6,6][9,9][12,12]. This is unlikely to matter most of
2081 // the time however.
2082 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2083 if (op2.singleton_p (&offset)
2084 && !integer_zerop (offset))
2085 return range_op_handler (MULT_EXPR, type).fold_range (r, type, lhs, op2);
2090 class operator_lshift : public cross_product_operator
2092 using range_operator::fold_range;
2093 using range_operator::op1_range;
2095 virtual bool op1_range (irange &r, tree type,
2098 relation_trio rel = TRIO_VARYING) const;
2099 virtual bool fold_range (irange &r, tree type,
2102 relation_trio rel = TRIO_VARYING) const;
2104 virtual void wi_fold (irange &r, tree type,
2105 const wide_int &lh_lb, const wide_int &lh_ub,
2106 const wide_int &rh_lb, const wide_int &rh_ub) const;
2107 virtual bool wi_op_overflows (wide_int &res,
2110 const wide_int &) const;
2113 class operator_rshift : public cross_product_operator
2115 using range_operator::fold_range;
2116 using range_operator::op1_range;
2117 using range_operator::lhs_op1_relation;
2119 virtual bool fold_range (irange &r, tree type,
2122 relation_trio rel = TRIO_VARYING) const;
2123 virtual void wi_fold (irange &r, tree type,
2124 const wide_int &lh_lb,
2125 const wide_int &lh_ub,
2126 const wide_int &rh_lb,
2127 const wide_int &rh_ub) const;
2128 virtual bool wi_op_overflows (wide_int &res,
2131 const wide_int &w1) const;
2132 virtual bool op1_range (irange &, tree type,
2135 relation_trio rel = TRIO_VARYING) const;
2136 virtual relation_kind lhs_op1_relation (const irange &lhs,
2139 relation_kind rel) const;
2144 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2147 relation_kind) const
2149 // If both operands range are >= 0, then the LHS <= op1.
2150 if (!op1.undefined_p () && !op2.undefined_p ()
2151 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2152 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2154 return VREL_VARYING;
2158 operator_lshift::fold_range (irange &r, tree type,
2161 relation_trio rel) const
2163 int_range_max shift_range;
2164 if (!get_shift_range (shift_range, type, op2))
2166 if (op2.undefined_p ())
2169 r.set_varying (type);
2173 // Transform left shifts by constants into multiplies.
2174 if (shift_range.singleton_p ())
2176 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2177 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2178 int_range<1> mult (type, tmp, tmp);
2180 // Force wrapping multiplication.
2181 bool saved_flag_wrapv = flag_wrapv;
2182 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2184 flag_wrapv_pointer = 1;
2185 bool b = op_mult.fold_range (r, type, op1, mult);
2186 flag_wrapv = saved_flag_wrapv;
2187 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2191 // Otherwise, invoke the generic fold routine.
2192 return range_operator::fold_range (r, type, op1, shift_range, rel);
2196 operator_lshift::wi_fold (irange &r, tree type,
2197 const wide_int &lh_lb, const wide_int &lh_ub,
2198 const wide_int &rh_lb, const wide_int &rh_ub) const
2200 signop sign = TYPE_SIGN (type);
2201 unsigned prec = TYPE_PRECISION (type);
2202 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2203 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2204 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2205 // overflow. However, for that to happen, rh.max needs to be zero,
2206 // which means rh is a singleton range of zero, which means we simply return
2207 // [lh_lb, lh_ub] as the range.
2208 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2210 r = int_range<2> (type, lh_lb, lh_ub);
2214 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2215 wide_int complement = ~(bound - 1);
2216 wide_int low_bound, high_bound;
2217 bool in_bounds = false;
2219 if (sign == UNSIGNED)
2222 high_bound = complement;
2223 if (wi::ltu_p (lh_ub, low_bound))
2225 // [5, 6] << [1, 2] == [10, 24].
2226 // We're shifting out only zeroes, the value increases
2230 else if (wi::ltu_p (high_bound, lh_lb))
2232 // [0xffffff00, 0xffffffff] << [1, 2]
2233 // == [0xfffffc00, 0xfffffffe].
2234 // We're shifting out only ones, the value decreases
2241 // [-1, 1] << [1, 2] == [-4, 4]
2242 low_bound = complement;
2244 if (wi::lts_p (lh_ub, high_bound)
2245 && wi::lts_p (low_bound, lh_lb))
2247 // For non-negative numbers, we're shifting out only zeroes,
2248 // the value increases monotonically. For negative numbers,
2249 // we're shifting out only ones, the value decreases
2256 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2258 r.set_varying (type);
2262 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2263 const wide_int &w0, const wide_int &w1) const
2265 signop sign = TYPE_SIGN (type);
2268 // It's unclear from the C standard whether shifts can overflow.
2269 // The following code ignores overflow; perhaps a C standard
2270 // interpretation ruling is needed.
2271 res = wi::rshift (w0, -w1, sign);
2274 res = wi::lshift (w0, w1);
2279 operator_lshift::op1_range (irange &r,
2283 relation_trio) const
2285 if (lhs.undefined_p ())
2289 if (!lhs.contains_p (build_zero_cst (type)))
2290 r.set_nonzero (type);
2292 r.set_varying (type);
2294 if (op2.singleton_p (&shift_amount))
2296 wide_int shift = wi::to_wide (shift_amount);
2297 if (wi::lt_p (shift, 0, SIGNED))
2299 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2300 TYPE_PRECISION (op2.type ())),
2309 // Work completely in unsigned mode to start.
2311 int_range_max tmp_range;
2312 if (TYPE_SIGN (type) == SIGNED)
2314 int_range_max tmp = lhs;
2315 utype = unsigned_type_for (type);
2316 range_cast (tmp, utype);
2317 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2320 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2322 // Start with ranges which can produce the LHS by right shifting the
2323 // result by the shift amount.
2324 // ie [0x08, 0xF0] = op1 << 2 will start with
2325 // [00001000, 11110000] = op1 << 2
2326 // [0x02, 0x4C] aka [00000010, 00111100]
2328 // Then create a range from the LB with the least significant upper bit
2329 // set, to the upper bound with all the bits set.
2330 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2332 // Ideally we do this for each subrange, but just lump them all for now.
2333 unsigned low_bits = TYPE_PRECISION (utype)
2334 - TREE_INT_CST_LOW (shift_amount);
2335 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2336 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2337 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2338 int_range<2> fill_range (utype, new_lb, new_ub);
2339 tmp_range.union_ (fill_range);
2342 range_cast (tmp_range, type);
2344 r.intersect (tmp_range);
2348 return !r.varying_p ();
2352 operator_rshift::op1_range (irange &r,
2356 relation_trio) const
2359 if (lhs.undefined_p ())
2361 if (op2.singleton_p (&shift))
2363 // Ignore nonsensical shifts.
2364 unsigned prec = TYPE_PRECISION (type);
2365 if (wi::ge_p (wi::to_wide (shift),
2366 wi::uhwi (prec, TYPE_PRECISION (TREE_TYPE (shift))),
2369 if (wi::to_wide (shift) == 0)
2375 // Folding the original operation may discard some impossible
2376 // ranges from the LHS.
2377 int_range_max lhs_refined;
2378 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2379 lhs_refined.intersect (lhs);
2380 if (lhs_refined.undefined_p ())
2385 int_range_max shift_range (shift, shift);
2386 int_range_max lb, ub;
2387 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2389 // 0000 0111 = OP1 >> 3
2391 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2392 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2393 // right hand side (0x07).
2394 tree mask = fold_build1 (BIT_NOT_EXPR, type,
2395 fold_build2 (LSHIFT_EXPR, type,
2396 build_minus_one_cst (type),
2398 int_range_max mask_range (build_zero_cst (type), mask);
2399 op_plus.fold_range (ub, type, lb, mask_range);
2402 if (!lhs_refined.contains_p (build_zero_cst (type)))
2404 mask_range.invert ();
2405 r.intersect (mask_range);
2413 operator_rshift::wi_op_overflows (wide_int &res,
2416 const wide_int &w1) const
2418 signop sign = TYPE_SIGN (type);
2420 res = wi::lshift (w0, -w1);
2423 // It's unclear from the C standard whether shifts can overflow.
2424 // The following code ignores overflow; perhaps a C standard
2425 // interpretation ruling is needed.
2426 res = wi::rshift (w0, w1, sign);
2432 operator_rshift::fold_range (irange &r, tree type,
2435 relation_trio rel) const
2437 int_range_max shift;
2438 if (!get_shift_range (shift, type, op2))
2440 if (op2.undefined_p ())
2443 r.set_varying (type);
2447 return range_operator::fold_range (r, type, op1, shift, rel);
2451 operator_rshift::wi_fold (irange &r, tree type,
2452 const wide_int &lh_lb, const wide_int &lh_ub,
2453 const wide_int &rh_lb, const wide_int &rh_ub) const
2455 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2459 class operator_cast: public range_operator
2461 using range_operator::fold_range;
2462 using range_operator::op1_range;
2464 virtual bool fold_range (irange &r, tree type,
2467 relation_trio rel = TRIO_VARYING) const;
2468 virtual bool op1_range (irange &r, tree type,
2471 relation_trio rel = TRIO_VARYING) const;
2472 virtual relation_kind lhs_op1_relation (const irange &lhs,
2475 relation_kind) const;
2477 bool truncating_cast_p (const irange &inner, const irange &outer) const;
2478 bool inside_domain_p (const wide_int &min, const wide_int &max,
2479 const irange &outer) const;
2480 void fold_pair (irange &r, unsigned index, const irange &inner,
2481 const irange &outer) const;
2484 // Add a partial equivalence between the LHS and op1 for casts.
2487 operator_cast::lhs_op1_relation (const irange &lhs,
2489 const irange &op2 ATTRIBUTE_UNUSED,
2490 relation_kind) const
2492 if (lhs.undefined_p () || op1.undefined_p ())
2493 return VREL_VARYING;
2494 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2495 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2496 // If the result gets sign extended into a larger type check first if this
2497 // qualifies as a partial equivalence.
2498 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2500 // If the result is sign extended, and the LHS is larger than op1,
2501 // check if op1's range can be negative as the sign extention will
2502 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2503 int_range<3> negs = range_negatives (op1.type ());
2504 negs.intersect (op1);
2505 if (!negs.undefined_p ())
2506 return VREL_VARYING;
2509 unsigned prec = MIN (lhs_prec, op1_prec);
2510 return bits_to_pe (prec);
2513 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2516 operator_cast::truncating_cast_p (const irange &inner,
2517 const irange &outer) const
2519 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
2522 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2525 operator_cast::inside_domain_p (const wide_int &min,
2526 const wide_int &max,
2527 const irange &range) const
2529 wide_int domain_min = wi::to_wide (vrp_val_min (range.type ()));
2530 wide_int domain_max = wi::to_wide (vrp_val_max (range.type ()));
2531 signop domain_sign = TYPE_SIGN (range.type ());
2532 return (wi::le_p (min, domain_max, domain_sign)
2533 && wi::le_p (max, domain_max, domain_sign)
2534 && wi::ge_p (min, domain_min, domain_sign)
2535 && wi::ge_p (max, domain_min, domain_sign));
2539 // Helper for fold_range which work on a pair at a time.
2542 operator_cast::fold_pair (irange &r, unsigned index,
2543 const irange &inner,
2544 const irange &outer) const
2546 tree inner_type = inner.type ();
2547 tree outer_type = outer.type ();
2548 signop inner_sign = TYPE_SIGN (inner_type);
2549 unsigned outer_prec = TYPE_PRECISION (outer_type);
2551 // check to see if casting from INNER to OUTER is a conversion that
2552 // fits in the resulting OUTER type.
2553 wide_int inner_lb = inner.lower_bound (index);
2554 wide_int inner_ub = inner.upper_bound (index);
2555 if (truncating_cast_p (inner, outer))
2557 // We may be able to accomodate a truncating cast if the
2558 // resulting range can be represented in the target type...
2559 if (wi::rshift (wi::sub (inner_ub, inner_lb),
2560 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
2563 r.set_varying (outer_type);
2567 // ...but we must still verify that the final range fits in the
2568 // domain. This catches -fstrict-enum restrictions where the domain
2569 // range is smaller than what fits in the underlying type.
2570 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
2571 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
2572 if (inside_domain_p (min, max, outer))
2573 create_possibly_reversed_range (r, outer_type, min, max);
2575 r.set_varying (outer_type);
2580 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2581 const irange &inner,
2582 const irange &outer,
2583 relation_trio) const
2585 if (empty_range_varying (r, type, inner, outer))
2588 gcc_checking_assert (outer.varying_p ());
2589 gcc_checking_assert (inner.num_pairs () > 0);
2591 // Avoid a temporary by folding the first pair directly into the result.
2592 fold_pair (r, 0, inner, outer);
2594 // Then process any additonal pairs by unioning with their results.
2595 for (unsigned x = 1; x < inner.num_pairs (); ++x)
2598 fold_pair (tmp, x, inner, outer);
2604 // Update the nonzero mask. Truncating casts are problematic unless
2605 // the conversion fits in the resulting outer type.
2606 wide_int nz = inner.get_nonzero_bits ();
2607 if (truncating_cast_p (inner, outer)
2608 && wi::rshift (nz, wi::uhwi (TYPE_PRECISION (outer.type ()),
2609 TYPE_PRECISION (inner.type ())),
2610 TYPE_SIGN (inner.type ())) != 0)
2612 nz = wide_int::from (nz, TYPE_PRECISION (type), TYPE_SIGN (inner.type ()));
2613 r.set_nonzero_bits (nz);
2619 operator_cast::op1_range (irange &r, tree type,
2622 relation_trio) const
2624 if (lhs.undefined_p ())
2626 tree lhs_type = lhs.type ();
2627 gcc_checking_assert (types_compatible_p (op2.type(), type));
2629 // If we are calculating a pointer, shortcut to what we really care about.
2630 if (POINTER_TYPE_P (type))
2632 // Conversion from other pointers or a constant (including 0/NULL)
2633 // are straightforward.
2634 if (POINTER_TYPE_P (lhs.type ())
2635 || (lhs.singleton_p ()
2636 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
2639 range_cast (r, type);
2643 // If the LHS is not a pointer nor a singleton, then it is
2644 // either VARYING or non-zero.
2645 if (!lhs.contains_p (build_zero_cst (lhs.type ())))
2646 r.set_nonzero (type);
2648 r.set_varying (type);
2654 if (truncating_cast_p (op2, lhs))
2656 if (lhs.varying_p ())
2657 r.set_varying (type);
2660 // We want to insert the LHS as an unsigned value since it
2661 // would not trigger the signed bit of the larger type.
2662 int_range_max converted_lhs = lhs;
2663 range_cast (converted_lhs, unsigned_type_for (lhs_type));
2664 range_cast (converted_lhs, type);
2665 // Start by building the positive signed outer range for the type.
2666 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
2667 TYPE_PRECISION (type));
2668 r = int_range<1> (type, lim, wi::max_value (TYPE_PRECISION (type),
2670 // For the signed part, we need to simply union the 2 ranges now.
2671 r.union_ (converted_lhs);
2673 // Create maximal negative number outside of LHS bits.
2674 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
2675 TYPE_PRECISION (type));
2676 // Add this to the unsigned LHS range(s).
2677 int_range_max lim_range (type, lim, lim);
2678 int_range_max lhs_neg;
2679 range_op_handler (PLUS_EXPR, type).fold_range (lhs_neg, type,
2682 // lhs_neg now has all the negative versions of the LHS.
2683 // Now union in all the values from SIGNED MIN (0x80000) to
2684 // lim-1 in order to fill in all the ranges with the upper
2687 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2688 // we don't need to create a range from min to lim-1
2689 // calculate neg range traps trying to create [lim, lim - 1].
2690 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
2693 int_range_max neg (type,
2694 wi::min_value (TYPE_PRECISION (type),
2697 lhs_neg.union_ (neg);
2699 // And finally, munge the signed and unsigned portions.
2702 // And intersect with any known value passed in the extra operand.
2708 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
2712 // The cast is not truncating, and the range is restricted to
2713 // the range of the RHS by this assignment.
2715 // Cast the range of the RHS to the type of the LHS.
2716 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
2717 // Intersect this with the LHS range will produce the range,
2718 // which will be cast to the RHS type before returning.
2719 tmp.intersect (lhs);
2722 // Cast the calculated range to the type of the RHS.
2723 fold_range (r, type, tmp, int_range<1> (type));
2728 class operator_logical_and : public range_operator
2730 using range_operator::fold_range;
2731 using range_operator::op1_range;
2732 using range_operator::op2_range;
2734 virtual bool fold_range (irange &r, tree type,
2737 relation_trio rel = TRIO_VARYING) const;
2738 virtual bool op1_range (irange &r, tree type,
2741 relation_trio rel = TRIO_VARYING) const;
2742 virtual bool op2_range (irange &r, tree type,
2745 relation_trio rel = TRIO_VARYING) const;
2750 operator_logical_and::fold_range (irange &r, tree type,
2753 relation_trio) const
2755 if (empty_range_varying (r, type, lh, rh))
2758 // 0 && anything is 0.
2759 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
2760 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
2761 r = range_false (type);
2762 else if (lh.contains_p (build_zero_cst (lh.type ()))
2763 || rh.contains_p (build_zero_cst (rh.type ())))
2764 // To reach this point, there must be a logical 1 on each side, and
2765 // the only remaining question is whether there is a zero or not.
2766 r = range_true_and_false (type);
2768 r = range_true (type);
2773 operator_logical_and::op1_range (irange &r, tree type,
2775 const irange &op2 ATTRIBUTE_UNUSED,
2776 relation_trio) const
2778 switch (get_bool_state (r, lhs, type))
2781 // A true result means both sides of the AND must be true.
2782 r = range_true (type);
2785 // Any other result means only one side has to be false, the
2786 // other side can be anything. So we cannot be sure of any
2788 r = range_true_and_false (type);
2795 operator_logical_and::op2_range (irange &r, tree type,
2798 relation_trio) const
2800 return operator_logical_and::op1_range (r, type, lhs, op1);
2804 class operator_bitwise_and : public range_operator
2806 using range_operator::op1_range;
2807 using range_operator::op2_range;
2809 virtual bool op1_range (irange &r, tree type,
2812 relation_trio rel = TRIO_VARYING) const;
2813 virtual bool op2_range (irange &r, tree type,
2816 relation_trio rel = TRIO_VARYING) const;
2817 virtual void wi_fold (irange &r, tree type,
2818 const wide_int &lh_lb,
2819 const wide_int &lh_ub,
2820 const wide_int &rh_lb,
2821 const wide_int &rh_ub) const;
2822 virtual relation_kind lhs_op1_relation (const irange &lhs,
2825 relation_kind) const;
2827 void simple_op1_range_solver (irange &r, tree type,
2829 const irange &op2) const;
2833 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
2834 // by considering the number of leading redundant sign bit copies.
2835 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
2836 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
2838 wi_optimize_signed_bitwise_op (irange &r, tree type,
2839 const wide_int &lh_lb, const wide_int &lh_ub,
2840 const wide_int &rh_lb, const wide_int &rh_ub)
2842 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
2843 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
2844 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
2847 int type_prec = TYPE_PRECISION (type);
2848 int rprec = (type_prec - new_clrsb) - 1;
2849 value_range_with_overflow (r, type,
2850 wi::mask (rprec, true, type_prec),
2851 wi::mask (rprec, false, type_prec));
2855 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
2859 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
2862 relation_kind) const
2864 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
2865 return VREL_VARYING;
2866 if (!op2.singleton_p ())
2867 return VREL_VARYING;
2868 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
2869 int prec1 = TYPE_PRECISION (op1.type ());
2870 int prec2 = TYPE_PRECISION (op2.type ());
2872 wide_int mask = op2.lower_bound ();
2873 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
2875 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
2877 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
2879 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
2881 return bits_to_pe (MIN (prec1, mask_prec));
2884 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
2885 // possible. Basically, see if we can optimize:
2889 // [LB op Z, UB op Z]
2891 // If the optimization was successful, accumulate the range in R and
2895 wi_optimize_and_or (irange &r,
2896 enum tree_code code,
2898 const wide_int &lh_lb, const wide_int &lh_ub,
2899 const wide_int &rh_lb, const wide_int &rh_ub)
2901 // Calculate the singleton mask among the ranges, if any.
2902 wide_int lower_bound, upper_bound, mask;
2903 if (wi::eq_p (rh_lb, rh_ub))
2906 lower_bound = lh_lb;
2907 upper_bound = lh_ub;
2909 else if (wi::eq_p (lh_lb, lh_ub))
2912 lower_bound = rh_lb;
2913 upper_bound = rh_ub;
2918 // If Z is a constant which (for op | its bitwise not) has n
2919 // consecutive least significant bits cleared followed by m 1
2920 // consecutive bits set immediately above it and either
2921 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2923 // The least significant n bits of all the values in the range are
2924 // cleared or set, the m bits above it are preserved and any bits
2925 // above these are required to be the same for all values in the
2929 if (code == BIT_IOR_EXPR)
2931 if (wi::eq_p (w, 0))
2932 n = w.get_precision ();
2936 w = ~(w | wi::mask (n, false, w.get_precision ()));
2937 if (wi::eq_p (w, 0))
2938 m = w.get_precision () - n;
2940 m = wi::ctz (w) - n;
2942 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
2943 if ((new_mask & lower_bound) != (new_mask & upper_bound))
2946 wide_int res_lb, res_ub;
2947 if (code == BIT_AND_EXPR)
2949 res_lb = wi::bit_and (lower_bound, mask);
2950 res_ub = wi::bit_and (upper_bound, mask);
2952 else if (code == BIT_IOR_EXPR)
2954 res_lb = wi::bit_or (lower_bound, mask);
2955 res_ub = wi::bit_or (upper_bound, mask);
2959 value_range_with_overflow (r, type, res_lb, res_ub);
2961 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
2962 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
2965 tmp.set_nonzero (type);
2971 // For range [LB, UB] compute two wide_int bit masks.
2973 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
2974 // for all numbers in the range the bit is 0, otherwise it might be 0
2977 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
2978 // for all numbers in the range the bit is 1, otherwise it might be 0
2982 wi_set_zero_nonzero_bits (tree type,
2983 const wide_int &lb, const wide_int &ub,
2984 wide_int &maybe_nonzero,
2985 wide_int &mustbe_nonzero)
2987 signop sign = TYPE_SIGN (type);
2989 if (wi::eq_p (lb, ub))
2990 maybe_nonzero = mustbe_nonzero = lb;
2991 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
2993 wide_int xor_mask = lb ^ ub;
2994 maybe_nonzero = lb | ub;
2995 mustbe_nonzero = lb & ub;
2998 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
2999 maybe_nonzero.get_precision ());
3000 maybe_nonzero = maybe_nonzero | mask;
3001 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3006 maybe_nonzero = wi::minus_one (lb.get_precision ());
3007 mustbe_nonzero = wi::zero (lb.get_precision ());
3012 operator_bitwise_and::wi_fold (irange &r, tree type,
3013 const wide_int &lh_lb,
3014 const wide_int &lh_ub,
3015 const wide_int &rh_lb,
3016 const wide_int &rh_ub) const
3018 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3021 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3022 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3023 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3024 maybe_nonzero_lh, mustbe_nonzero_lh);
3025 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3026 maybe_nonzero_rh, mustbe_nonzero_rh);
3028 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3029 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3030 signop sign = TYPE_SIGN (type);
3031 unsigned prec = TYPE_PRECISION (type);
3032 // If both input ranges contain only negative values, we can
3033 // truncate the result range maximum to the minimum of the
3034 // input range maxima.
3035 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3037 new_ub = wi::min (new_ub, lh_ub, sign);
3038 new_ub = wi::min (new_ub, rh_ub, sign);
3040 // If either input range contains only non-negative values
3041 // we can truncate the result range maximum to the respective
3042 // maximum of the input range.
3043 if (wi::ge_p (lh_lb, 0, sign))
3044 new_ub = wi::min (new_ub, lh_ub, sign);
3045 if (wi::ge_p (rh_lb, 0, sign))
3046 new_ub = wi::min (new_ub, rh_ub, sign);
3047 // PR68217: In case of signed & sign-bit-CST should
3048 // result in [-INF, 0] instead of [-INF, INF].
3049 if (wi::gt_p (new_lb, new_ub, sign))
3051 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3053 && ((wi::eq_p (lh_lb, lh_ub)
3054 && !wi::cmps (lh_lb, sign_bit))
3055 || (wi::eq_p (rh_lb, rh_ub)
3056 && !wi::cmps (rh_lb, sign_bit))))
3058 new_lb = wi::min_value (prec, sign);
3059 new_ub = wi::zero (prec);
3062 // If the limits got swapped around, return varying.
3063 if (wi::gt_p (new_lb, new_ub,sign))
3066 && wi_optimize_signed_bitwise_op (r, type,
3070 r.set_varying (type);
3073 value_range_with_overflow (r, type, new_lb, new_ub);
3077 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3079 if (!lhs.contains_p (build_zero_cst (type)))
3080 r = range_nonzero (type);
3082 r.set_varying (type);
3085 // This was shamelessly stolen from register_edge_assert_for_2 and
3086 // adjusted to work with iranges.
3089 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3091 const irange &op2) const
3093 if (!op2.singleton_p ())
3095 set_nonzero_range_from_mask (r, type, lhs);
3098 unsigned int nprec = TYPE_PRECISION (type);
3099 wide_int cst2v = op2.lower_bound ();
3100 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3103 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3105 sgnbit = wi::zero (nprec);
3107 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3109 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3110 // maximum unsigned value is ~0. For signed comparison, if CST2
3111 // doesn't have the most significant bit set, handle it similarly. If
3112 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3113 wide_int valv = lhs.lower_bound ();
3114 wide_int minv = valv & cst2v, maxv;
3115 bool we_know_nothing = false;
3118 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3119 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3122 // If we can't determine anything on this bound, fall
3123 // through and conservatively solve for the other end point.
3124 we_know_nothing = true;
3127 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3128 if (we_know_nothing)
3129 r.set_varying (type);
3131 r = int_range<1> (type, minv, maxv);
3133 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3135 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3136 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3138 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3140 // For signed comparison, if CST2 doesn't have most significant bit
3141 // set, handle it similarly. If CST2 has MSB set, the maximum is
3142 // the same and minimum is INT_MIN.
3143 valv = lhs.upper_bound ();
3144 minv = valv & cst2v;
3149 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3152 // If we couldn't determine anything on either bound, return
3154 if (we_know_nothing)
3162 int_range<1> upper_bits (type, minv, maxv);
3163 r.intersect (upper_bits);
3167 operator_bitwise_and::op1_range (irange &r, tree type,
3170 relation_trio) const
3172 if (lhs.undefined_p ())
3174 if (types_compatible_p (type, boolean_type_node))
3175 return op_logical_and.op1_range (r, type, lhs, op2);
3178 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3180 int_range_max chunk (lhs.type (),
3181 lhs.lower_bound (i),
3182 lhs.upper_bound (i));
3184 simple_op1_range_solver (res, type, chunk, op2);
3187 if (r.undefined_p ())
3188 set_nonzero_range_from_mask (r, type, lhs);
3190 // For 0 = op1 & MASK, op1 is ~MASK.
3191 if (lhs.zero_p () && op2.singleton_p ())
3193 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3194 int_range<2> tmp (type);
3195 tmp.set_nonzero_bits (nz);
3202 operator_bitwise_and::op2_range (irange &r, tree type,
3205 relation_trio) const
3207 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3211 class operator_logical_or : public range_operator
3213 using range_operator::fold_range;
3214 using range_operator::op1_range;
3215 using range_operator::op2_range;
3217 virtual bool fold_range (irange &r, tree type,
3220 relation_trio rel = TRIO_VARYING) const;
3221 virtual bool op1_range (irange &r, tree type,
3224 relation_trio rel = TRIO_VARYING) const;
3225 virtual bool op2_range (irange &r, tree type,
3228 relation_trio rel = TRIO_VARYING) const;
3232 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3235 relation_trio) const
3237 if (empty_range_varying (r, type, lh, rh))
3246 operator_logical_or::op1_range (irange &r, tree type,
3248 const irange &op2 ATTRIBUTE_UNUSED,
3249 relation_trio) const
3251 switch (get_bool_state (r, lhs, type))
3254 // A false result means both sides of the OR must be false.
3255 r = range_false (type);
3258 // Any other result means only one side has to be true, the
3259 // other side can be anything. so we can't be sure of any result
3261 r = range_true_and_false (type);
3268 operator_logical_or::op2_range (irange &r, tree type,
3271 relation_trio) const
3273 return operator_logical_or::op1_range (r, type, lhs, op1);
3277 class operator_bitwise_or : public range_operator
3279 using range_operator::op1_range;
3280 using range_operator::op2_range;
3282 virtual bool op1_range (irange &r, tree type,
3285 relation_trio rel = TRIO_VARYING) const;
3286 virtual bool op2_range (irange &r, tree type,
3289 relation_trio rel = TRIO_VARYING) const;
3290 virtual void wi_fold (irange &r, tree type,
3291 const wide_int &lh_lb,
3292 const wide_int &lh_ub,
3293 const wide_int &rh_lb,
3294 const wide_int &rh_ub) const;
3298 operator_bitwise_or::wi_fold (irange &r, tree type,
3299 const wide_int &lh_lb,
3300 const wide_int &lh_ub,
3301 const wide_int &rh_lb,
3302 const wide_int &rh_ub) const
3304 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3307 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3308 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3309 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3310 maybe_nonzero_lh, mustbe_nonzero_lh);
3311 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3312 maybe_nonzero_rh, mustbe_nonzero_rh);
3313 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3314 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3315 signop sign = TYPE_SIGN (type);
3316 // If the input ranges contain only positive values we can
3317 // truncate the minimum of the result range to the maximum
3318 // of the input range minima.
3319 if (wi::ge_p (lh_lb, 0, sign)
3320 && wi::ge_p (rh_lb, 0, sign))
3322 new_lb = wi::max (new_lb, lh_lb, sign);
3323 new_lb = wi::max (new_lb, rh_lb, sign);
3325 // If either input range contains only negative values
3326 // we can truncate the minimum of the result range to the
3327 // respective minimum range.
3328 if (wi::lt_p (lh_ub, 0, sign))
3329 new_lb = wi::max (new_lb, lh_lb, sign);
3330 if (wi::lt_p (rh_ub, 0, sign))
3331 new_lb = wi::max (new_lb, rh_lb, sign);
3332 // If the limits got swapped around, return a conservative range.
3333 if (wi::gt_p (new_lb, new_ub, sign))
3335 // Make sure that nonzero|X is nonzero.
3336 if (wi::gt_p (lh_lb, 0, sign)
3337 || wi::gt_p (rh_lb, 0, sign)
3338 || wi::lt_p (lh_ub, 0, sign)
3339 || wi::lt_p (rh_ub, 0, sign))
3340 r.set_nonzero (type);
3341 else if (sign == SIGNED
3342 && wi_optimize_signed_bitwise_op (r, type,
3347 r.set_varying (type);
3350 value_range_with_overflow (r, type, new_lb, new_ub);
3354 operator_bitwise_or::op1_range (irange &r, tree type,
3357 relation_trio) const
3359 if (lhs.undefined_p ())
3361 // If this is really a logical wi_fold, call that.
3362 if (types_compatible_p (type, boolean_type_node))
3363 return op_logical_or.op1_range (r, type, lhs, op2);
3367 tree zero = build_zero_cst (type);
3368 r = int_range<1> (zero, zero);
3371 r.set_varying (type);
3376 operator_bitwise_or::op2_range (irange &r, tree type,
3379 relation_trio) const
3381 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3385 class operator_bitwise_xor : public range_operator
3387 using range_operator::op1_range;
3388 using range_operator::op2_range;
3390 virtual void wi_fold (irange &r, tree type,
3391 const wide_int &lh_lb,
3392 const wide_int &lh_ub,
3393 const wide_int &rh_lb,
3394 const wide_int &rh_ub) const;
3395 virtual bool op1_range (irange &r, tree type,
3398 relation_trio rel = TRIO_VARYING) const;
3399 virtual bool op2_range (irange &r, tree type,
3402 relation_trio rel = TRIO_VARYING) const;
3403 virtual bool op1_op2_relation_effect (irange &lhs_range,
3405 const irange &op1_range,
3406 const irange &op2_range,
3407 relation_kind rel) const;
3411 operator_bitwise_xor::wi_fold (irange &r, tree type,
3412 const wide_int &lh_lb,
3413 const wide_int &lh_ub,
3414 const wide_int &rh_lb,
3415 const wide_int &rh_ub) const
3417 signop sign = TYPE_SIGN (type);
3418 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3419 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3420 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3421 maybe_nonzero_lh, mustbe_nonzero_lh);
3422 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3423 maybe_nonzero_rh, mustbe_nonzero_rh);
3425 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3426 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3427 wide_int result_one_bits
3428 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3429 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3430 wide_int new_ub = ~result_zero_bits;
3431 wide_int new_lb = result_one_bits;
3433 // If the range has all positive or all negative values, the result
3434 // is better than VARYING.
3435 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3436 value_range_with_overflow (r, type, new_lb, new_ub);
3437 else if (sign == SIGNED
3438 && wi_optimize_signed_bitwise_op (r, type,
3443 r.set_varying (type);
3445 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3446 if (wi::lt_p (lh_ub, rh_lb, sign)
3447 || wi::lt_p (rh_ub, lh_lb, sign)
3448 || wi::ne_p (result_one_bits, 0))
3451 tmp.set_nonzero (type);
3457 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3461 relation_kind rel) const
3463 if (rel == VREL_VARYING)
3466 int_range<2> rel_range;
3471 rel_range.set_zero (type);
3474 rel_range.set_nonzero (type);
3480 lhs_range.intersect (rel_range);
3485 operator_bitwise_xor::op1_range (irange &r, tree type,
3488 relation_trio) const
3490 if (lhs.undefined_p () || lhs.varying_p ())
3495 if (types_compatible_p (type, boolean_type_node))
3497 switch (get_bool_state (r, lhs, type))
3500 if (op2.varying_p ())
3501 r.set_varying (type);
3502 else if (op2.zero_p ())
3503 r = range_true (type);
3505 r = range_false (type);
3515 r.set_varying (type);
3520 operator_bitwise_xor::op2_range (irange &r, tree type,
3523 relation_trio) const
3525 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
3528 class operator_trunc_mod : public range_operator
3530 using range_operator::op1_range;
3531 using range_operator::op2_range;
3533 virtual void wi_fold (irange &r, tree type,
3534 const wide_int &lh_lb,
3535 const wide_int &lh_ub,
3536 const wide_int &rh_lb,
3537 const wide_int &rh_ub) const;
3538 virtual bool op1_range (irange &r, tree type,
3541 relation_trio) const;
3542 virtual bool op2_range (irange &r, tree type,
3545 relation_trio) const;
3549 operator_trunc_mod::wi_fold (irange &r, tree type,
3550 const wide_int &lh_lb,
3551 const wide_int &lh_ub,
3552 const wide_int &rh_lb,
3553 const wide_int &rh_ub) const
3555 wide_int new_lb, new_ub, tmp;
3556 signop sign = TYPE_SIGN (type);
3557 unsigned prec = TYPE_PRECISION (type);
3559 // Mod 0 is undefined.
3560 if (wi_zero_p (type, rh_lb, rh_ub))
3566 // Check for constant and try to fold.
3567 if (lh_lb == lh_ub && rh_lb == rh_ub)
3569 wi::overflow_type ov = wi::OVF_NONE;
3570 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
3571 if (ov == wi::OVF_NONE)
3573 r = int_range<2> (type, tmp, tmp);
3578 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3583 new_ub = wi::smax (new_ub, tmp);
3586 if (sign == UNSIGNED)
3587 new_lb = wi::zero (prec);
3592 if (wi::gts_p (tmp, 0))
3593 tmp = wi::zero (prec);
3594 new_lb = wi::smax (new_lb, tmp);
3597 if (sign == SIGNED && wi::neg_p (tmp))
3598 tmp = wi::zero (prec);
3599 new_ub = wi::min (new_ub, tmp, sign);
3601 value_range_with_overflow (r, type, new_lb, new_ub);
3605 operator_trunc_mod::op1_range (irange &r, tree type,
3608 relation_trio) const
3610 if (lhs.undefined_p ())
3613 signop sign = TYPE_SIGN (type);
3614 unsigned prec = TYPE_PRECISION (type);
3615 // (a % b) >= x && x > 0 , then a >= x.
3616 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3618 r = value_range (type, lhs.lower_bound (), wi::max_value (prec, sign));
3621 // (a % b) <= x && x < 0 , then a <= x.
3622 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3624 r = value_range (type, wi::min_value (prec, sign), lhs.upper_bound ());
3631 operator_trunc_mod::op2_range (irange &r, tree type,
3634 relation_trio) const
3636 if (lhs.undefined_p ())
3639 signop sign = TYPE_SIGN (type);
3640 unsigned prec = TYPE_PRECISION (type);
3641 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3642 // or b > x for unsigned.
3643 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3646 r = value_range (type, wi::neg (lhs.lower_bound ()),
3647 lhs.lower_bound (), VR_ANTI_RANGE);
3648 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
3650 r = value_range (type, lhs.lower_bound () + 1,
3651 wi::max_value (prec, sign));
3656 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3657 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3659 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
3660 r = value_range (type, lhs.upper_bound (),
3661 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
3670 class operator_logical_not : public range_operator
3672 using range_operator::fold_range;
3673 using range_operator::op1_range;
3675 virtual bool fold_range (irange &r, tree type,
3678 relation_trio rel = TRIO_VARYING) const;
3679 virtual bool op1_range (irange &r, tree type,
3682 relation_trio rel = TRIO_VARYING) const;
3685 // Folding a logical NOT, oddly enough, involves doing nothing on the
3686 // forward pass through. During the initial walk backwards, the
3687 // logical NOT reversed the desired outcome on the way back, so on the
3688 // way forward all we do is pass the range forward.
3693 // to determine the TRUE branch, walking backward
3694 // if (b_3) if ([1,1])
3695 // b_3 = !b_2 [1,1] = ![0,0]
3696 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3697 // which is the result we are looking for.. so.. pass it through.
3700 operator_logical_not::fold_range (irange &r, tree type,
3702 const irange &rh ATTRIBUTE_UNUSED,
3703 relation_trio) const
3705 if (empty_range_varying (r, type, lh, rh))
3709 if (!lh.varying_p () && !lh.undefined_p ())
3716 operator_logical_not::op1_range (irange &r,
3720 relation_trio) const
3722 // Logical NOT is involutary...do it again.
3723 return fold_range (r, type, lhs, op2);
3727 class operator_bitwise_not : public range_operator
3729 using range_operator::fold_range;
3730 using range_operator::op1_range;
3732 virtual bool fold_range (irange &r, tree type,
3735 relation_trio rel = TRIO_VARYING) const;
3736 virtual bool op1_range (irange &r, tree type,
3739 relation_trio rel = TRIO_VARYING) const;
3743 operator_bitwise_not::fold_range (irange &r, tree type,
3746 relation_trio) const
3748 if (empty_range_varying (r, type, lh, rh))
3751 if (types_compatible_p (type, boolean_type_node))
3752 return op_logical_not.fold_range (r, type, lh, rh);
3754 // ~X is simply -1 - X.
3755 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
3756 wi::minus_one (TYPE_PRECISION (type)));
3757 return range_op_handler (MINUS_EXPR, type).fold_range (r, type, minusone, lh);
3761 operator_bitwise_not::op1_range (irange &r, tree type,
3764 relation_trio) const
3766 if (lhs.undefined_p ())
3768 if (types_compatible_p (type, boolean_type_node))
3769 return op_logical_not.op1_range (r, type, lhs, op2);
3771 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
3772 return fold_range (r, type, lhs, op2);
3776 class operator_cst : public range_operator
3778 using range_operator::fold_range;
3780 virtual bool fold_range (irange &r, tree type,
3783 relation_trio rel = TRIO_VARYING) const;
3787 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3789 const irange &rh ATTRIBUTE_UNUSED,
3790 relation_trio) const
3797 class operator_identity : public range_operator
3799 using range_operator::fold_range;
3800 using range_operator::op1_range;
3801 using range_operator::lhs_op1_relation;
3803 virtual bool fold_range (irange &r, tree type,
3806 relation_trio rel = TRIO_VARYING) const;
3807 virtual bool op1_range (irange &r, tree type,
3810 relation_trio rel = TRIO_VARYING) const;
3811 virtual relation_kind lhs_op1_relation (const irange &lhs,
3814 relation_kind rel) const;
3817 // Determine if there is a relationship between LHS and OP1.
3820 operator_identity::lhs_op1_relation (const irange &lhs,
3821 const irange &op1 ATTRIBUTE_UNUSED,
3822 const irange &op2 ATTRIBUTE_UNUSED,
3823 relation_kind) const
3825 if (lhs.undefined_p ())
3826 return VREL_VARYING;
3827 // Simply a copy, so they are equivalent.
3832 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3834 const irange &rh ATTRIBUTE_UNUSED,
3835 relation_trio) const
3842 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
3844 const irange &op2 ATTRIBUTE_UNUSED,
3845 relation_trio) const
3852 class operator_unknown : public range_operator
3854 using range_operator::fold_range;
3856 virtual bool fold_range (irange &r, tree type,
3859 relation_trio rel = TRIO_VARYING) const;
3863 operator_unknown::fold_range (irange &r, tree type,
3864 const irange &lh ATTRIBUTE_UNUSED,
3865 const irange &rh ATTRIBUTE_UNUSED,
3866 relation_trio) const
3868 r.set_varying (type);
3873 class operator_abs : public range_operator
3875 using range_operator::op1_range;
3877 virtual void wi_fold (irange &r, tree type,
3878 const wide_int &lh_lb,
3879 const wide_int &lh_ub,
3880 const wide_int &rh_lb,
3881 const wide_int &rh_ub) const;
3882 virtual bool op1_range (irange &r, tree type,
3885 relation_trio) const;
3889 operator_abs::wi_fold (irange &r, tree type,
3890 const wide_int &lh_lb, const wide_int &lh_ub,
3891 const wide_int &rh_lb ATTRIBUTE_UNUSED,
3892 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
3895 signop sign = TYPE_SIGN (type);
3896 unsigned prec = TYPE_PRECISION (type);
3898 // Pass through LH for the easy cases.
3899 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
3901 r = int_range<1> (type, lh_lb, lh_ub);
3905 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
3907 wide_int min_value = wi::min_value (prec, sign);
3908 wide_int max_value = wi::max_value (prec, sign);
3909 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
3911 r.set_varying (type);
3915 // ABS_EXPR may flip the range around, if the original range
3916 // included negative values.
3917 if (wi::eq_p (lh_lb, min_value))
3919 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
3920 // returned [-MIN,-MIN] so this preserves that behaviour. PR37078
3921 if (wi::eq_p (lh_ub, min_value))
3923 r = int_range<1> (type, min_value, min_value);
3929 min = wi::abs (lh_lb);
3931 if (wi::eq_p (lh_ub, min_value))
3934 max = wi::abs (lh_ub);
3936 // If the range contains zero then we know that the minimum value in the
3937 // range will be zero.
3938 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
3940 if (wi::gt_p (min, max, sign))
3942 min = wi::zero (prec);
3946 // If the range was reversed, swap MIN and MAX.
3947 if (wi::gt_p (min, max, sign))
3948 std::swap (min, max);
3951 // If the new range has its limits swapped around (MIN > MAX), then
3952 // the operation caused one of them to wrap around. The only thing
3953 // we know is that the result is positive.
3954 if (wi::gt_p (min, max, sign))
3956 min = wi::zero (prec);
3959 r = int_range<1> (type, min, max);
3963 operator_abs::op1_range (irange &r, tree type,
3966 relation_trio) const
3968 if (empty_range_varying (r, type, lhs, op2))
3970 if (TYPE_UNSIGNED (type))
3975 // Start with the positives because negatives are an impossible result.
3976 int_range_max positives = range_positives (type);
3977 positives.intersect (lhs);
3979 // Then add the negative of each pair:
3980 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
3981 for (unsigned i = 0; i < positives.num_pairs (); ++i)
3982 r.union_ (int_range<1> (type,
3983 -positives.upper_bound (i),
3984 -positives.lower_bound (i)));
3985 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
3986 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
3987 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
3988 wide_int lb = lhs.lower_bound ();
3989 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
3990 r.union_ (int_range<2> (type, lb, lb));
3995 class operator_absu : public range_operator
3998 virtual void wi_fold (irange &r, tree type,
3999 const wide_int &lh_lb, const wide_int &lh_ub,
4000 const wide_int &rh_lb, const wide_int &rh_ub) const;
4004 operator_absu::wi_fold (irange &r, tree type,
4005 const wide_int &lh_lb, const wide_int &lh_ub,
4006 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4007 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4009 wide_int new_lb, new_ub;
4011 // Pass through VR0 the easy cases.
4012 if (wi::ges_p (lh_lb, 0))
4019 new_lb = wi::abs (lh_lb);
4020 new_ub = wi::abs (lh_ub);
4022 // If the range contains zero then we know that the minimum
4023 // value in the range will be zero.
4024 if (wi::ges_p (lh_ub, 0))
4026 if (wi::gtu_p (new_lb, new_ub))
4028 new_lb = wi::zero (TYPE_PRECISION (type));
4031 std::swap (new_lb, new_ub);
4034 gcc_checking_assert (TYPE_UNSIGNED (type));
4035 r = int_range<1> (type, new_lb, new_ub);
4039 class operator_negate : public range_operator
4041 using range_operator::fold_range;
4042 using range_operator::op1_range;
4044 virtual bool fold_range (irange &r, tree type,
4047 relation_trio rel = TRIO_VARYING) const;
4048 virtual bool op1_range (irange &r, tree type,
4051 relation_trio rel = TRIO_VARYING) const;
4055 operator_negate::fold_range (irange &r, tree type,
4058 relation_trio) const
4060 if (empty_range_varying (r, type, lh, rh))
4062 // -X is simply 0 - X.
4063 return range_op_handler (MINUS_EXPR, type).fold_range (r, type,
4064 range_zero (type), lh);
4068 operator_negate::op1_range (irange &r, tree type,
4071 relation_trio) const
4073 // NEGATE is involutory.
4074 return fold_range (r, type, lhs, op2);
4078 class operator_addr_expr : public range_operator
4080 using range_operator::fold_range;
4081 using range_operator::op1_range;
4083 virtual bool fold_range (irange &r, tree type,
4086 relation_trio rel = TRIO_VARYING) const;
4087 virtual bool op1_range (irange &r, tree type,
4090 relation_trio rel = TRIO_VARYING) const;
4094 operator_addr_expr::fold_range (irange &r, tree type,
4097 relation_trio) const
4099 if (empty_range_varying (r, type, lh, rh))
4102 // Return a non-null pointer of the LHS type (passed in op2).
4104 r = range_zero (type);
4105 else if (!lh.contains_p (build_zero_cst (lh.type ())))
4106 r = range_nonzero (type);
4108 r.set_varying (type);
4113 operator_addr_expr::op1_range (irange &r, tree type,
4116 relation_trio) const
4118 return operator_addr_expr::fold_range (r, type, lhs, op2);
4122 class pointer_plus_operator : public range_operator
4125 virtual void wi_fold (irange &r, tree type,
4126 const wide_int &lh_lb,
4127 const wide_int &lh_ub,
4128 const wide_int &rh_lb,
4129 const wide_int &rh_ub) const;
4133 pointer_plus_operator::wi_fold (irange &r, tree type,
4134 const wide_int &lh_lb,
4135 const wide_int &lh_ub,
4136 const wide_int &rh_lb,
4137 const wide_int &rh_ub) const
4139 // Check for [0,0] + const, and simply return the const.
4140 if (lh_lb == 0 && lh_ub == 0 && rh_lb == rh_ub)
4142 tree val = wide_int_to_tree (type, rh_lb);
4147 // For pointer types, we are really only interested in asserting
4148 // whether the expression evaluates to non-NULL.
4150 // With -fno-delete-null-pointer-checks we need to be more
4151 // conservative. As some object might reside at address 0,
4152 // then some offset could be added to it and the same offset
4153 // subtracted again and the result would be NULL.
4155 // static int a[12]; where &a[0] is NULL and
4158 // ptr will be NULL here, even when there is POINTER_PLUS_EXPR
4159 // where the first range doesn't include zero and the second one
4160 // doesn't either. As the second operand is sizetype (unsigned),
4161 // consider all ranges where the MSB could be set as possible
4162 // subtractions where the result might be NULL.
4163 if ((!wi_includes_zero_p (type, lh_lb, lh_ub)
4164 || !wi_includes_zero_p (type, rh_lb, rh_ub))
4165 && !TYPE_OVERFLOW_WRAPS (type)
4166 && (flag_delete_null_pointer_checks
4167 || !wi::sign_mask (rh_ub)))
4168 r = range_nonzero (type);
4169 else if (lh_lb == lh_ub && lh_lb == 0
4170 && rh_lb == rh_ub && rh_lb == 0)
4171 r = range_zero (type);
4173 r.set_varying (type);
4177 class pointer_min_max_operator : public range_operator
4180 virtual void wi_fold (irange & r, tree type,
4181 const wide_int &lh_lb, const wide_int &lh_ub,
4182 const wide_int &rh_lb, const wide_int &rh_ub) const;
4186 pointer_min_max_operator::wi_fold (irange &r, tree type,
4187 const wide_int &lh_lb,
4188 const wide_int &lh_ub,
4189 const wide_int &rh_lb,
4190 const wide_int &rh_ub) const
4192 // For MIN/MAX expressions with pointers, we only care about
4193 // nullness. If both are non null, then the result is nonnull.
4194 // If both are null, then the result is null. Otherwise they
4196 if (!wi_includes_zero_p (type, lh_lb, lh_ub)
4197 && !wi_includes_zero_p (type, rh_lb, rh_ub))
4198 r = range_nonzero (type);
4199 else if (wi_zero_p (type, lh_lb, lh_ub) && wi_zero_p (type, rh_lb, rh_ub))
4200 r = range_zero (type);
4202 r.set_varying (type);
4206 class pointer_and_operator : public range_operator
4209 virtual void wi_fold (irange &r, tree type,
4210 const wide_int &lh_lb, const wide_int &lh_ub,
4211 const wide_int &rh_lb, const wide_int &rh_ub) const;
4215 pointer_and_operator::wi_fold (irange &r, tree type,
4216 const wide_int &lh_lb,
4217 const wide_int &lh_ub,
4218 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4219 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4221 // For pointer types, we are really only interested in asserting
4222 // whether the expression evaluates to non-NULL.
4223 if (wi_zero_p (type, lh_lb, lh_ub) || wi_zero_p (type, lh_lb, lh_ub))
4224 r = range_zero (type);
4226 r.set_varying (type);
4230 class pointer_or_operator : public range_operator
4232 using range_operator::op1_range;
4233 using range_operator::op2_range;
4235 virtual bool op1_range (irange &r, tree type,
4238 relation_trio rel = TRIO_VARYING) const;
4239 virtual bool op2_range (irange &r, tree type,
4242 relation_trio rel = TRIO_VARYING) const;
4243 virtual void wi_fold (irange &r, tree type,
4244 const wide_int &lh_lb, const wide_int &lh_ub,
4245 const wide_int &rh_lb, const wide_int &rh_ub) const;
4249 pointer_or_operator::op1_range (irange &r, tree type,
4251 const irange &op2 ATTRIBUTE_UNUSED,
4252 relation_trio) const
4254 if (lhs.undefined_p ())
4258 tree zero = build_zero_cst (type);
4259 r = int_range<1> (zero, zero);
4262 r.set_varying (type);
4267 pointer_or_operator::op2_range (irange &r, tree type,
4270 relation_trio) const
4272 return pointer_or_operator::op1_range (r, type, lhs, op1);
4276 pointer_or_operator::wi_fold (irange &r, tree type,
4277 const wide_int &lh_lb,
4278 const wide_int &lh_ub,
4279 const wide_int &rh_lb,
4280 const wide_int &rh_ub) const
4282 // For pointer types, we are really only interested in asserting
4283 // whether the expression evaluates to non-NULL.
4284 if (!wi_includes_zero_p (type, lh_lb, lh_ub)
4285 && !wi_includes_zero_p (type, rh_lb, rh_ub))
4286 r = range_nonzero (type);
4287 else if (wi_zero_p (type, lh_lb, lh_ub) && wi_zero_p (type, rh_lb, rh_ub))
4288 r = range_zero (type);
4290 r.set_varying (type);
4293 // Return a pointer to the range_operator instance, if there is one
4294 // associated with tree_code CODE.
4297 range_op_table::operator[] (enum tree_code code)
4299 gcc_checking_assert (code > 0 && code < MAX_TREE_CODES);
4300 return m_range_tree[code];
4303 // Add OP to the handler table for CODE.
4306 range_op_table::set (enum tree_code code, range_operator &op)
4308 gcc_checking_assert (m_range_tree[code] == NULL);
4309 m_range_tree[code] = &op;
4310 gcc_checking_assert (op.m_code == ERROR_MARK || op.m_code == code);
4314 // Shared operators that require separate instantiations because they
4315 // do not share a common tree code.
4316 static operator_cast op_nop, op_convert;
4317 static operator_identity op_ssa, op_paren, op_obj_type;
4318 static operator_unknown op_realpart, op_imagpart;
4319 static pointer_min_max_operator op_ptr_min, op_ptr_max;
4320 static operator_div op_trunc_div;
4321 static operator_div op_floor_div;
4322 static operator_div op_round_div;
4323 static operator_div op_ceil_div;
4325 // Instantiate a range op table for integral operations.
4327 class integral_table : public range_op_table
4331 } integral_tree_table;
4333 integral_table::integral_table ()
4335 set (EQ_EXPR, op_equal);
4336 set (NE_EXPR, op_not_equal);
4337 set (LT_EXPR, op_lt);
4338 set (LE_EXPR, op_le);
4339 set (GT_EXPR, op_gt);
4340 set (GE_EXPR, op_ge);
4341 set (PLUS_EXPR, op_plus);
4342 set (MINUS_EXPR, op_minus);
4343 set (MIN_EXPR, op_min);
4344 set (MAX_EXPR, op_max);
4345 set (MULT_EXPR, op_mult);
4346 set (TRUNC_DIV_EXPR, op_trunc_div);
4347 set (FLOOR_DIV_EXPR, op_floor_div);
4348 set (ROUND_DIV_EXPR, op_round_div);
4349 set (CEIL_DIV_EXPR, op_ceil_div);
4350 set (EXACT_DIV_EXPR, op_exact_div);
4351 set (LSHIFT_EXPR, op_lshift);
4352 set (RSHIFT_EXPR, op_rshift);
4353 set (NOP_EXPR, op_nop);
4354 set (CONVERT_EXPR, op_convert);
4355 set (TRUTH_AND_EXPR, op_logical_and);
4356 set (BIT_AND_EXPR, op_bitwise_and);
4357 set (TRUTH_OR_EXPR, op_logical_or);
4358 set (BIT_IOR_EXPR, op_bitwise_or);
4359 set (BIT_XOR_EXPR, op_bitwise_xor);
4360 set (TRUNC_MOD_EXPR, op_trunc_mod);
4361 set (TRUTH_NOT_EXPR, op_logical_not);
4362 set (BIT_NOT_EXPR, op_bitwise_not);
4363 set (INTEGER_CST, op_integer_cst);
4364 set (SSA_NAME, op_ssa);
4365 set (PAREN_EXPR, op_paren);
4366 set (OBJ_TYPE_REF, op_obj_type);
4367 set (IMAGPART_EXPR, op_imagpart);
4368 set (REALPART_EXPR, op_realpart);
4369 set (POINTER_DIFF_EXPR, op_pointer_diff);
4370 set (ABS_EXPR, op_abs);
4371 set (ABSU_EXPR, op_absu);
4372 set (NEGATE_EXPR, op_negate);
4373 set (ADDR_EXPR, op_addr);
4376 // Instantiate a range op table for pointer operations.
4378 class pointer_table : public range_op_table
4382 } pointer_tree_table;
4384 pointer_table::pointer_table ()
4386 set (BIT_AND_EXPR, op_pointer_and);
4387 set (BIT_IOR_EXPR, op_pointer_or);
4388 set (MIN_EXPR, op_ptr_min);
4389 set (MAX_EXPR, op_ptr_max);
4390 set (POINTER_PLUS_EXPR, op_pointer_plus);
4392 set (EQ_EXPR, op_equal);
4393 set (NE_EXPR, op_not_equal);
4394 set (LT_EXPR, op_lt);
4395 set (LE_EXPR, op_le);
4396 set (GT_EXPR, op_gt);
4397 set (GE_EXPR, op_ge);
4398 set (SSA_NAME, op_ssa);
4399 set (INTEGER_CST, op_integer_cst);
4400 set (ADDR_EXPR, op_addr);
4401 set (NOP_EXPR, op_nop);
4402 set (CONVERT_EXPR, op_convert);
4404 set (BIT_NOT_EXPR, op_bitwise_not);
4405 set (BIT_XOR_EXPR, op_bitwise_xor);
4408 // The tables are hidden and accessed via a simple extern function.
4410 static inline range_operator *
4411 get_handler (enum tree_code code, tree type)
4413 // First check if there is a pointer specialization.
4414 if (POINTER_TYPE_P (type))
4415 return pointer_tree_table[code];
4416 if (INTEGRAL_TYPE_P (type))
4417 return integral_tree_table[code];
4421 // Return the floating point operator for CODE or NULL if none available.
4423 static inline range_operator_float *
4424 get_float_handler (enum tree_code code, tree)
4426 return (*floating_tree_table)[code];
4430 range_op_handler::set_op_handler (tree_code code, tree type)
4432 if (irange::supports_p (type))
4435 m_int = get_handler (code, type);
4436 m_valid = m_int != NULL;
4438 else if (frange::supports_p (type))
4441 m_float = get_float_handler (code, type);
4442 m_valid = m_float != NULL;
4452 range_op_handler::range_op_handler ()
4459 range_op_handler::range_op_handler (tree_code code, tree type)
4461 set_op_handler (code, type);
4466 range_op_handler::fold_range (vrange &r, tree type,
4469 relation_trio rel) const
4471 gcc_checking_assert (m_valid);
4473 return m_int->fold_range (as_a <irange> (r), type,
4475 as_a <irange> (rh), rel);
4477 if (is_a <irange> (r))
4479 if (is_a <irange> (rh))
4480 return m_float->fold_range (as_a <irange> (r), type,
4482 as_a <irange> (rh), rel);
4484 return m_float->fold_range (as_a <irange> (r), type,
4486 as_a <frange> (rh), rel);
4488 return m_float->fold_range (as_a <frange> (r), type,
4490 as_a <frange> (rh), rel);
4494 range_op_handler::op1_range (vrange &r, tree type,
4497 relation_trio rel) const
4499 gcc_checking_assert (m_valid);
4501 if (lhs.undefined_p ())
4504 return m_int->op1_range (as_a <irange> (r), type,
4505 as_a <irange> (lhs),
4506 as_a <irange> (op2), rel);
4508 if (is_a <irange> (lhs))
4509 return m_float->op1_range (as_a <frange> (r), type,
4510 as_a <irange> (lhs),
4511 as_a <frange> (op2), rel);
4512 return m_float->op1_range (as_a <frange> (r), type,
4513 as_a <frange> (lhs),
4514 as_a <frange> (op2), rel);
4518 range_op_handler::op2_range (vrange &r, tree type,
4521 relation_trio rel) const
4523 gcc_checking_assert (m_valid);
4524 if (lhs.undefined_p ())
4527 return m_int->op2_range (as_a <irange> (r), type,
4528 as_a <irange> (lhs),
4529 as_a <irange> (op1), rel);
4531 if (is_a <irange> (lhs))
4532 return m_float->op2_range (as_a <frange> (r), type,
4533 as_a <irange> (lhs),
4534 as_a <frange> (op1), rel);
4535 return m_float->op2_range (as_a <frange> (r), type,
4536 as_a <frange> (lhs),
4537 as_a <frange> (op1), rel);
4541 range_op_handler::lhs_op1_relation (const vrange &lhs,
4544 relation_kind rel) const
4546 gcc_checking_assert (m_valid);
4548 return m_int->lhs_op1_relation (as_a <irange> (lhs),
4549 as_a <irange> (op1),
4550 as_a <irange> (op2), rel);
4552 if (is_a <irange> (lhs))
4553 return m_float->lhs_op1_relation (as_a <irange> (lhs),
4554 as_a <frange> (op1),
4555 as_a <frange> (op2), rel);
4556 return m_float->lhs_op1_relation (as_a <frange> (lhs),
4557 as_a <frange> (op1),
4558 as_a <frange> (op2), rel);
4562 range_op_handler::lhs_op2_relation (const vrange &lhs,
4565 relation_kind rel) const
4567 gcc_checking_assert (m_valid);
4569 return m_int->lhs_op2_relation (as_a <irange> (lhs),
4570 as_a <irange> (op1),
4571 as_a <irange> (op2), rel);
4573 if (is_a <irange> (lhs))
4574 return m_float->lhs_op2_relation (as_a <irange> (lhs),
4575 as_a <frange> (op1),
4576 as_a <frange> (op2), rel);
4577 return m_float->lhs_op2_relation (as_a <frange> (lhs),
4578 as_a <frange> (op1),
4579 as_a <frange> (op2), rel);
4583 range_op_handler::op1_op2_relation (const vrange &lhs) const
4585 gcc_checking_assert (m_valid);
4587 return m_int->op1_op2_relation (as_a <irange> (lhs));
4588 if (is_a <irange> (lhs))
4589 return m_float->op1_op2_relation (as_a <irange> (lhs));
4590 return m_float->op1_op2_relation (as_a <frange> (lhs));
4593 // Cast the range in R to TYPE.
4596 range_cast (vrange &r, tree type)
4598 Value_Range tmp (r);
4599 Value_Range varying (type);
4600 varying.set_varying (type);
4601 range_op_handler op (CONVERT_EXPR, type);
4602 // Call op_convert, if it fails, the result is varying.
4603 if (!op || !op.fold_range (r, type, tmp, varying))
4605 r.set_varying (type);
4612 #include "selftest.h"
4616 #define INT(N) build_int_cst (integer_type_node, (N))
4617 #define UINT(N) build_int_cstu (unsigned_type_node, (N))
4618 #define INT16(N) build_int_cst (short_integer_type_node, (N))
4619 #define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
4620 #define SCHAR(N) build_int_cst (signed_char_type_node, (N))
4621 #define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
4624 range_op_cast_tests ()
4626 int_range<1> r0, r1, r2, rold;
4627 r0.set_varying (integer_type_node);
4628 tree maxint = wide_int_to_tree (integer_type_node, r0.upper_bound ());
4630 // If a range is in any way outside of the range for the converted
4631 // to range, default to the range for the new type.
4632 r0.set_varying (short_integer_type_node);
4633 tree minshort = wide_int_to_tree (short_integer_type_node, r0.lower_bound ());
4634 tree maxshort = wide_int_to_tree (short_integer_type_node, r0.upper_bound ());
4635 if (TYPE_PRECISION (TREE_TYPE (maxint))
4636 > TYPE_PRECISION (short_integer_type_node))
4638 r1 = int_range<1> (integer_zero_node, maxint);
4639 range_cast (r1, short_integer_type_node);
4640 ASSERT_TRUE (r1.lower_bound () == wi::to_wide (minshort)
4641 && r1.upper_bound() == wi::to_wide (maxshort));
4644 // (unsigned char)[-5,-1] => [251,255].
4645 r0 = rold = int_range<1> (SCHAR (-5), SCHAR (-1));
4646 range_cast (r0, unsigned_char_type_node);
4647 ASSERT_TRUE (r0 == int_range<1> (UCHAR (251), UCHAR (255)));
4648 range_cast (r0, signed_char_type_node);
4649 ASSERT_TRUE (r0 == rold);
4651 // (signed char)[15, 150] => [-128,-106][15,127].
4652 r0 = rold = int_range<1> (UCHAR (15), UCHAR (150));
4653 range_cast (r0, signed_char_type_node);
4654 r1 = int_range<1> (SCHAR (15), SCHAR (127));
4655 r2 = int_range<1> (SCHAR (-128), SCHAR (-106));
4657 ASSERT_TRUE (r1 == r0);
4658 range_cast (r0, unsigned_char_type_node);
4659 ASSERT_TRUE (r0 == rold);
4661 // (unsigned char)[-5, 5] => [0,5][251,255].
4662 r0 = rold = int_range<1> (SCHAR (-5), SCHAR (5));
4663 range_cast (r0, unsigned_char_type_node);
4664 r1 = int_range<1> (UCHAR (251), UCHAR (255));
4665 r2 = int_range<1> (UCHAR (0), UCHAR (5));
4667 ASSERT_TRUE (r0 == r1);
4668 range_cast (r0, signed_char_type_node);
4669 ASSERT_TRUE (r0 == rold);
4671 // (unsigned char)[-5,5] => [0,5][251,255].
4672 r0 = int_range<1> (INT (-5), INT (5));
4673 range_cast (r0, unsigned_char_type_node);
4674 r1 = int_range<1> (UCHAR (0), UCHAR (5));
4675 r1.union_ (int_range<1> (UCHAR (251), UCHAR (255)));
4676 ASSERT_TRUE (r0 == r1);
4678 // (unsigned char)[5U,1974U] => [0,255].
4679 r0 = int_range<1> (UINT (5), UINT (1974));
4680 range_cast (r0, unsigned_char_type_node);
4681 ASSERT_TRUE (r0 == int_range<1> (UCHAR (0), UCHAR (255)));
4682 range_cast (r0, integer_type_node);
4683 // Going to a wider range should not sign extend.
4684 ASSERT_TRUE (r0 == int_range<1> (INT (0), INT (255)));
4686 // (unsigned char)[-350,15] => [0,255].
4687 r0 = int_range<1> (INT (-350), INT (15));
4688 range_cast (r0, unsigned_char_type_node);
4689 ASSERT_TRUE (r0 == (int_range<1>
4690 (TYPE_MIN_VALUE (unsigned_char_type_node),
4691 TYPE_MAX_VALUE (unsigned_char_type_node))));
4693 // Casting [-120,20] from signed char to unsigned short.
4694 // => [0, 20][0xff88, 0xffff].
4695 r0 = int_range<1> (SCHAR (-120), SCHAR (20));
4696 range_cast (r0, short_unsigned_type_node);
4697 r1 = int_range<1> (UINT16 (0), UINT16 (20));
4698 r2 = int_range<1> (UINT16 (0xff88), UINT16 (0xffff));
4700 ASSERT_TRUE (r0 == r1);
4701 // A truncating cast back to signed char will work because [-120, 20]
4702 // is representable in signed char.
4703 range_cast (r0, signed_char_type_node);
4704 ASSERT_TRUE (r0 == int_range<1> (SCHAR (-120), SCHAR (20)));
4706 // unsigned char -> signed short
4707 // (signed short)[(unsigned char)25, (unsigned char)250]
4708 // => [(signed short)25, (signed short)250]
4709 r0 = rold = int_range<1> (UCHAR (25), UCHAR (250));
4710 range_cast (r0, short_integer_type_node);
4711 r1 = int_range<1> (INT16 (25), INT16 (250));
4712 ASSERT_TRUE (r0 == r1);
4713 range_cast (r0, unsigned_char_type_node);
4714 ASSERT_TRUE (r0 == rold);
4716 // Test casting a wider signed [-MIN,MAX] to a nar`rower unsigned.
4717 r0 = int_range<1> (TYPE_MIN_VALUE (long_long_integer_type_node),
4718 TYPE_MAX_VALUE (long_long_integer_type_node));
4719 range_cast (r0, short_unsigned_type_node);
4720 r1 = int_range<1> (TYPE_MIN_VALUE (short_unsigned_type_node),
4721 TYPE_MAX_VALUE (short_unsigned_type_node));
4722 ASSERT_TRUE (r0 == r1);
4724 // Casting NONZERO to a narrower type will wrap/overflow so
4725 // it's just the entire range for the narrower type.
4727 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4728 // is outside of the range of a smaller range, return the full
4730 if (TYPE_PRECISION (integer_type_node)
4731 > TYPE_PRECISION (short_integer_type_node))
4733 r0 = range_nonzero (integer_type_node);
4734 range_cast (r0, short_integer_type_node);
4735 r1 = int_range<1> (TYPE_MIN_VALUE (short_integer_type_node),
4736 TYPE_MAX_VALUE (short_integer_type_node));
4737 ASSERT_TRUE (r0 == r1);
4740 // Casting NONZERO from a narrower signed to a wider signed.
4742 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4743 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4744 r0 = range_nonzero (short_integer_type_node);
4745 range_cast (r0, integer_type_node);
4746 r1 = int_range<1> (INT (-32768), INT (-1));
4747 r2 = int_range<1> (INT (1), INT (32767));
4749 ASSERT_TRUE (r0 == r1);
4753 range_op_lshift_tests ()
4755 // Test that 0x808.... & 0x8.... still contains 0x8....
4756 // for a large set of numbers.
4759 tree big_type = long_long_unsigned_type_node;
4760 // big_num = 0x808,0000,0000,0000
4761 tree big_num = fold_build2 (LSHIFT_EXPR, big_type,
4762 build_int_cst (big_type, 0x808),
4763 build_int_cst (big_type, 48));
4764 op_bitwise_and.fold_range (res, big_type,
4765 int_range <1> (big_type),
4766 int_range <1> (big_num, big_num));
4767 // val = 0x8,0000,0000,0000
4768 tree val = fold_build2 (LSHIFT_EXPR, big_type,
4769 build_int_cst (big_type, 0x8),
4770 build_int_cst (big_type, 48));
4771 ASSERT_TRUE (res.contains_p (val));
4774 if (TYPE_PRECISION (unsigned_type_node) > 31)
4776 // unsigned VARYING = op1 << 1 should be VARYING.
4777 int_range<2> lhs (unsigned_type_node);
4778 int_range<2> shift (INT (1), INT (1));
4780 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4781 ASSERT_TRUE (op1.varying_p ());
4783 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4784 int_range<2> zero (UINT (0), UINT (0));
4785 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4786 ASSERT_TRUE (op1.num_pairs () == 2);
4787 // Remove the [0,0] range.
4788 op1.intersect (zero);
4789 ASSERT_TRUE (op1.num_pairs () == 1);
4790 // op1 << 1 should be [0x8000,0x8000] << 1,
4791 // which should result in [0,0].
4792 int_range_max result;
4793 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4794 ASSERT_TRUE (result == zero);
4796 // signed VARYING = op1 << 1 should be VARYING.
4797 if (TYPE_PRECISION (integer_type_node) > 31)
4799 // unsigned VARYING = op1 << 1 hould be VARYING.
4800 int_range<2> lhs (integer_type_node);
4801 int_range<2> shift (INT (1), INT (1));
4803 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4804 ASSERT_TRUE (op1.varying_p ());
4806 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4807 int_range<2> zero (INT (0), INT (0));
4808 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4809 ASSERT_TRUE (op1.num_pairs () == 2);
4810 // Remove the [0,0] range.
4811 op1.intersect (zero);
4812 ASSERT_TRUE (op1.num_pairs () == 1);
4813 // op1 << 1 shuould be [0x8000,0x8000] << 1,
4814 // which should result in [0,0].
4815 int_range_max result;
4816 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4817 ASSERT_TRUE (result == zero);
4822 range_op_rshift_tests ()
4824 // unsigned: [3, MAX] = OP1 >> 1
4826 int_range_max lhs (build_int_cst (unsigned_type_node, 3),
4827 TYPE_MAX_VALUE (unsigned_type_node));
4828 int_range_max one (build_one_cst (unsigned_type_node),
4829 build_one_cst (unsigned_type_node));
4831 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4832 ASSERT_FALSE (op1.contains_p (UINT (3)));
4835 // signed: [3, MAX] = OP1 >> 1
4837 int_range_max lhs (INT (3), TYPE_MAX_VALUE (integer_type_node));
4838 int_range_max one (INT (1), INT (1));
4840 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4841 ASSERT_FALSE (op1.contains_p (INT (-2)));
4844 // This is impossible, so OP1 should be [].
4845 // signed: [MIN, MIN] = OP1 >> 1
4847 int_range_max lhs (TYPE_MIN_VALUE (integer_type_node),
4848 TYPE_MIN_VALUE (integer_type_node));
4849 int_range_max one (INT (1), INT (1));
4851 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4852 ASSERT_TRUE (op1.undefined_p ());
4855 // signed: ~[-1] = OP1 >> 31
4856 if (TYPE_PRECISION (integer_type_node) > 31)
4858 int_range_max lhs (INT (-1), INT (-1), VR_ANTI_RANGE);
4859 int_range_max shift (INT (31), INT (31));
4861 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4862 int_range_max negatives = range_negatives (integer_type_node);
4863 negatives.intersect (op1);
4864 ASSERT_TRUE (negatives.undefined_p ());
4869 range_op_bitwise_and_tests ()
4872 tree min = vrp_val_min (integer_type_node);
4873 tree max = vrp_val_max (integer_type_node);
4874 tree tiny = fold_build2 (PLUS_EXPR, integer_type_node, min,
4875 build_one_cst (integer_type_node));
4876 int_range_max i1 (tiny, max);
4877 int_range_max i2 (build_int_cst (integer_type_node, 255),
4878 build_int_cst (integer_type_node, 255));
4880 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4881 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4882 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4884 // VARYING = OP1 & 255: OP1 is VARYING
4885 i1 = int_range<1> (integer_type_node);
4886 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4887 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4889 // For 0 = x & MASK, x is ~MASK.
4891 int_range<2> zero (integer_zero_node, integer_zero_node);
4892 int_range<2> mask = int_range<2> (INT (7), INT (7));
4893 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4894 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4895 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4898 // (NONZERO | X) is nonzero.
4899 i1.set_nonzero (integer_type_node);
4900 i2.set_varying (integer_type_node);
4901 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4902 ASSERT_TRUE (res.nonzero_p ());
4904 // (NEGATIVE | X) is nonzero.
4905 i1 = int_range<1> (INT (-5), INT (-3));
4906 i2.set_varying (integer_type_node);
4907 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4908 ASSERT_FALSE (res.contains_p (INT (0)));
4912 range_relational_tests ()
4914 int_range<2> lhs (unsigned_char_type_node);
4915 int_range<2> op1 (UCHAR (8), UCHAR (10));
4916 int_range<2> op2 (UCHAR (20), UCHAR (20));
4918 // Never wrapping additions mean LHS > OP1.
4919 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4920 ASSERT_TRUE (code == VREL_GT);
4922 // Most wrapping additions mean nothing...
4923 op1 = int_range<2> (UCHAR (8), UCHAR (10));
4924 op2 = int_range<2> (UCHAR (0), UCHAR (255));
4925 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4926 ASSERT_TRUE (code == VREL_VARYING);
4928 // However, always wrapping additions mean LHS < OP1.
4929 op1 = int_range<2> (UCHAR (1), UCHAR (255));
4930 op2 = int_range<2> (UCHAR (255), UCHAR (255));
4931 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4932 ASSERT_TRUE (code == VREL_LT);
4938 range_op_rshift_tests ();
4939 range_op_lshift_tests ();
4940 range_op_bitwise_and_tests ();
4941 range_op_cast_tests ();
4942 range_relational_tests ();
4944 extern void range_op_float_tests ();
4945 range_op_float_tests ();
4948 } // namespace selftest
4950 #endif // CHECKING_P