1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92-98, 1999, 2000 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /*@@ This file should be rewritten to use an arbitrary precision
22 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
23 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
24 @@ The routines that translate from the ap rep should
25 @@ warn if precision et. al. is lost.
26 @@ This would also make life easier when this technology is used
27 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int_wide, size_binop
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
42 force_fit_type takes a constant and prior overflow indicator, and
43 forces the value to fit the type. It returns an overflow indicator. */
55 static void encode PROTO((HOST_WIDE_INT *,
56 HOST_WIDE_INT, HOST_WIDE_INT));
57 static void decode PROTO((HOST_WIDE_INT *,
58 HOST_WIDE_INT *, HOST_WIDE_INT *));
59 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
60 HOST_WIDE_INT, HOST_WIDE_INT,
61 HOST_WIDE_INT, HOST_WIDE_INT *,
62 HOST_WIDE_INT *, HOST_WIDE_INT *,
64 static tree negate_expr PROTO((tree));
65 static tree split_tree PROTO((tree, enum tree_code, tree *, tree *,
67 static tree associate_trees PROTO((tree, tree, enum tree_code, tree));
68 static tree int_const_binop PROTO((enum tree_code, tree, tree, int, int));
69 static void const_binop_1 PROTO((PTR));
70 static tree const_binop PROTO((enum tree_code, tree, tree, int));
71 static void fold_convert_1 PROTO((PTR));
72 static tree fold_convert PROTO((tree, tree));
73 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
74 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
75 static int truth_value_p PROTO((enum tree_code));
76 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
77 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
78 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
79 static tree omit_one_operand PROTO((tree, tree, tree));
80 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
81 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
82 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
83 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
85 static tree decode_field_reference PROTO((tree, int *, int *,
86 enum machine_mode *, int *,
87 int *, tree *, tree *));
88 static int all_ones_mask_p PROTO((tree, int));
89 static int simple_operand_p PROTO((tree));
90 static tree range_binop PROTO((enum tree_code, tree, tree, int,
92 static tree make_range PROTO((tree, int *, tree *, tree *));
93 static tree build_range_check PROTO((tree, tree, int, tree, tree));
94 static int merge_ranges PROTO((int *, tree *, tree *, int, tree, tree,
96 static tree fold_range_test PROTO((tree));
97 static tree unextend PROTO((tree, int, int, tree));
98 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
99 static tree optimize_minmax_comparison PROTO((tree));
100 static tree extract_muldiv PROTO((tree, tree, enum tree_code, tree));
101 static tree strip_compound_expr PROTO((tree, tree));
102 static int multiple_of_p PROTO((tree, tree, tree));
103 static tree constant_boolean_node PROTO((int, tree));
104 static int count_cond PROTO((tree, int));
107 #define BRANCH_COST 1
110 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
111 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
112 and SUM1. Then this yields nonzero if overflow occurred during the
115 Overflow occurs if A and B have the same sign, but A and SUM differ in
116 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
118 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
120 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
121 We do that by representing the two-word integer in 4 words, with only
122 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
123 number. The value of the word is LOWPART + HIGHPART * BASE. */
126 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
127 #define HIGHPART(x) \
128 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
129 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
131 /* Unpack a two-word integer into 4 words.
132 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
133 WORDS points to the array of HOST_WIDE_INTs. */
136 encode (words, low, hi)
137 HOST_WIDE_INT *words;
138 HOST_WIDE_INT low, hi;
140 words[0] = LOWPART (low);
141 words[1] = HIGHPART (low);
142 words[2] = LOWPART (hi);
143 words[3] = HIGHPART (hi);
146 /* Pack an array of 4 words into a two-word integer.
147 WORDS points to the array of words.
148 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
151 decode (words, low, hi)
152 HOST_WIDE_INT *words;
153 HOST_WIDE_INT *low, *hi;
155 *low = words[0] + words[1] * BASE;
156 *hi = words[2] + words[3] * BASE;
159 /* Make the integer constant T valid for its type by setting to 0 or 1 all
160 the bits in the constant that don't belong in the type.
162 Return 1 if a signed overflow occurs, 0 otherwise. If OVERFLOW is
163 nonzero, a signed overflow has already occurred in calculating T, so
166 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
170 force_fit_type (t, overflow)
174 HOST_WIDE_INT low, high;
177 if (TREE_CODE (t) == REAL_CST)
179 #ifdef CHECK_FLOAT_VALUE
180 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
186 else if (TREE_CODE (t) != INTEGER_CST)
189 low = TREE_INT_CST_LOW (t);
190 high = TREE_INT_CST_HIGH (t);
192 if (POINTER_TYPE_P (TREE_TYPE (t)))
195 prec = TYPE_PRECISION (TREE_TYPE (t));
197 /* First clear all bits that are beyond the type's precision. */
199 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
201 else if (prec > HOST_BITS_PER_WIDE_INT)
202 TREE_INT_CST_HIGH (t)
203 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
206 TREE_INT_CST_HIGH (t) = 0;
207 if (prec < HOST_BITS_PER_WIDE_INT)
208 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
211 /* Unsigned types do not suffer sign extension or overflow. */
212 if (TREE_UNSIGNED (TREE_TYPE (t)))
215 /* If the value's sign bit is set, extend the sign. */
216 if (prec != 2 * HOST_BITS_PER_WIDE_INT
217 && (prec > HOST_BITS_PER_WIDE_INT
218 ? (TREE_INT_CST_HIGH (t)
219 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
220 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
222 /* Value is negative:
223 set to 1 all the bits that are outside this type's precision. */
224 if (prec > HOST_BITS_PER_WIDE_INT)
225 TREE_INT_CST_HIGH (t)
226 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
229 TREE_INT_CST_HIGH (t) = -1;
230 if (prec < HOST_BITS_PER_WIDE_INT)
231 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
235 /* Return nonzero if signed overflow occurred. */
237 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
241 /* Add two doubleword integers with doubleword result.
242 Each argument is given as two `HOST_WIDE_INT' pieces.
243 One argument is L1 and H1; the other, L2 and H2.
244 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
247 add_double (l1, h1, l2, h2, lv, hv)
248 HOST_WIDE_INT l1, h1, l2, h2;
249 HOST_WIDE_INT *lv, *hv;
254 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
258 return OVERFLOW_SUM_SIGN (h1, h2, h);
261 /* Negate a doubleword integer with doubleword result.
262 Return nonzero if the operation overflows, assuming it's signed.
263 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
264 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
267 neg_double (l1, h1, lv, hv)
268 HOST_WIDE_INT l1, h1;
269 HOST_WIDE_INT *lv, *hv;
275 return (*hv & h1) < 0;
285 /* Multiply two doubleword integers with doubleword result.
286 Return nonzero if the operation overflows, assuming it's signed.
287 Each argument is given as two `HOST_WIDE_INT' pieces.
288 One argument is L1 and H1; the other, L2 and H2.
289 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
292 mul_double (l1, h1, l2, h2, lv, hv)
293 HOST_WIDE_INT l1, h1, l2, h2;
294 HOST_WIDE_INT *lv, *hv;
296 HOST_WIDE_INT arg1[4];
297 HOST_WIDE_INT arg2[4];
298 HOST_WIDE_INT prod[4 * 2];
299 register unsigned HOST_WIDE_INT carry;
300 register int i, j, k;
301 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
303 encode (arg1, l1, h1);
304 encode (arg2, l2, h2);
306 bzero ((char *) prod, sizeof prod);
308 for (i = 0; i < 4; i++)
311 for (j = 0; j < 4; j++)
314 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
315 carry += arg1[i] * arg2[j];
316 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
318 prod[k] = LOWPART (carry);
319 carry = HIGHPART (carry);
324 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
326 /* Check for overflow by calculating the top half of the answer in full;
327 it should agree with the low half's sign bit. */
328 decode (prod+4, &toplow, &tophigh);
331 neg_double (l2, h2, &neglow, &neghigh);
332 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
336 neg_double (l1, h1, &neglow, &neghigh);
337 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
339 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
342 /* Shift the doubleword integer in L1, H1 left by COUNT places
343 keeping only PREC bits of result.
344 Shift right if COUNT is negative.
345 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
346 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
349 lshift_double (l1, h1, count, prec, lv, hv, arith)
350 HOST_WIDE_INT l1, h1, count;
352 HOST_WIDE_INT *lv, *hv;
357 rshift_double (l1, h1, - count, prec, lv, hv, arith);
361 #ifdef SHIFT_COUNT_TRUNCATED
362 if (SHIFT_COUNT_TRUNCATED)
366 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
368 /* Shifting by the host word size is undefined according to the
369 ANSI standard, so we must handle this as a special case. */
373 else if (count >= HOST_BITS_PER_WIDE_INT)
375 *hv = (unsigned HOST_WIDE_INT) l1 << (count - HOST_BITS_PER_WIDE_INT);
380 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
381 | ((unsigned HOST_WIDE_INT) l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
382 *lv = (unsigned HOST_WIDE_INT) l1 << count;
386 /* Shift the doubleword integer in L1, H1 right by COUNT places
387 keeping only PREC bits of result. COUNT must be positive.
388 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
389 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
392 rshift_double (l1, h1, count, prec, lv, hv, arith)
393 HOST_WIDE_INT l1, h1, count;
394 int prec ATTRIBUTE_UNUSED;
395 HOST_WIDE_INT *lv, *hv;
398 unsigned HOST_WIDE_INT signmask;
400 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
403 #ifdef SHIFT_COUNT_TRUNCATED
404 if (SHIFT_COUNT_TRUNCATED)
408 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
410 /* Shifting by the host word size is undefined according to the
411 ANSI standard, so we must handle this as a special case. */
415 else if (count >= HOST_BITS_PER_WIDE_INT)
418 *lv = ((signmask << (2 * HOST_BITS_PER_WIDE_INT - count - 1) << 1)
419 | ((unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT)));
423 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
424 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
425 *hv = ((signmask << (HOST_BITS_PER_WIDE_INT - count))
426 | ((unsigned HOST_WIDE_INT) h1 >> count));
430 /* Rotate the doubleword integer in L1, H1 left by COUNT places
431 keeping only PREC bits of result.
432 Rotate right if COUNT is negative.
433 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
436 lrotate_double (l1, h1, count, prec, lv, hv)
437 HOST_WIDE_INT l1, h1, count;
439 HOST_WIDE_INT *lv, *hv;
441 HOST_WIDE_INT s1l, s1h, s2l, s2h;
447 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
448 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
453 /* Rotate the doubleword integer in L1, H1 left by COUNT places
454 keeping only PREC bits of result. COUNT must be positive.
455 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
458 rrotate_double (l1, h1, count, prec, lv, hv)
459 HOST_WIDE_INT l1, h1, count;
461 HOST_WIDE_INT *lv, *hv;
463 HOST_WIDE_INT s1l, s1h, s2l, s2h;
469 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
470 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
475 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
476 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
477 CODE is a tree code for a kind of division, one of
478 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
480 It controls how the quotient is rounded to a integer.
481 Return nonzero if the operation overflows.
482 UNS nonzero says do unsigned division. */
485 div_and_round_double (code, uns,
486 lnum_orig, hnum_orig, lden_orig, hden_orig,
487 lquo, hquo, lrem, hrem)
490 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
491 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
492 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
495 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
496 HOST_WIDE_INT den[4], quo[4];
498 unsigned HOST_WIDE_INT work;
499 register unsigned HOST_WIDE_INT carry = 0;
500 HOST_WIDE_INT lnum = lnum_orig;
501 HOST_WIDE_INT hnum = hnum_orig;
502 HOST_WIDE_INT lden = lden_orig;
503 HOST_WIDE_INT hden = hden_orig;
506 if ((hden == 0) && (lden == 0))
507 overflow = 1, lden = 1;
509 /* calculate quotient sign and convert operands to unsigned. */
515 /* (minimum integer) / (-1) is the only overflow case. */
516 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
522 neg_double (lden, hden, &lden, &hden);
526 if (hnum == 0 && hden == 0)
527 { /* single precision */
529 /* This unsigned division rounds toward zero. */
530 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
535 { /* trivial case: dividend < divisor */
536 /* hden != 0 already checked. */
543 bzero ((char *) quo, sizeof quo);
545 bzero ((char *) num, sizeof num); /* to zero 9th element */
546 bzero ((char *) den, sizeof den);
548 encode (num, lnum, hnum);
549 encode (den, lden, hden);
551 /* Special code for when the divisor < BASE. */
552 if (hden == 0 && lden < (HOST_WIDE_INT) BASE)
554 /* hnum != 0 already checked. */
555 for (i = 4 - 1; i >= 0; i--)
557 work = num[i] + carry * BASE;
558 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
559 carry = work % (unsigned HOST_WIDE_INT) lden;
564 /* Full double precision division,
565 with thanks to Don Knuth's "Seminumerical Algorithms". */
566 int num_hi_sig, den_hi_sig;
567 unsigned HOST_WIDE_INT quo_est, scale;
569 /* Find the highest non-zero divisor digit. */
570 for (i = 4 - 1; ; i--)
576 /* Insure that the first digit of the divisor is at least BASE/2.
577 This is required by the quotient digit estimation algorithm. */
579 scale = BASE / (den[den_hi_sig] + 1);
580 if (scale > 1) { /* scale divisor and dividend */
582 for (i = 0; i <= 4 - 1; i++) {
583 work = (num[i] * scale) + carry;
584 num[i] = LOWPART (work);
585 carry = HIGHPART (work);
588 for (i = 0; i <= 4 - 1; i++) {
589 work = (den[i] * scale) + carry;
590 den[i] = LOWPART (work);
591 carry = HIGHPART (work);
592 if (den[i] != 0) den_hi_sig = i;
599 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
600 /* guess the next quotient digit, quo_est, by dividing the first
601 two remaining dividend digits by the high order quotient digit.
602 quo_est is never low and is at most 2 high. */
603 unsigned HOST_WIDE_INT tmp;
605 num_hi_sig = i + den_hi_sig + 1;
606 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
607 if (num[num_hi_sig] != den[den_hi_sig])
608 quo_est = work / den[den_hi_sig];
612 /* refine quo_est so it's usually correct, and at most one high. */
613 tmp = work - quo_est * den[den_hi_sig];
615 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
618 /* Try QUO_EST as the quotient digit, by multiplying the
619 divisor by QUO_EST and subtracting from the remaining dividend.
620 Keep in mind that QUO_EST is the I - 1st digit. */
623 for (j = 0; j <= den_hi_sig; j++)
625 work = quo_est * den[j] + carry;
626 carry = HIGHPART (work);
627 work = num[i + j] - LOWPART (work);
628 num[i + j] = LOWPART (work);
629 carry += HIGHPART (work) != 0;
632 /* if quo_est was high by one, then num[i] went negative and
633 we need to correct things. */
635 if (num[num_hi_sig] < carry)
638 carry = 0; /* add divisor back in */
639 for (j = 0; j <= den_hi_sig; j++)
641 work = num[i + j] + den[j] + carry;
642 carry = HIGHPART (work);
643 num[i + j] = LOWPART (work);
645 num [num_hi_sig] += carry;
648 /* store the quotient digit. */
653 decode (quo, lquo, hquo);
656 /* if result is negative, make it so. */
658 neg_double (*lquo, *hquo, lquo, hquo);
660 /* compute trial remainder: rem = num - (quo * den) */
661 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
662 neg_double (*lrem, *hrem, lrem, hrem);
663 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
668 case TRUNC_MOD_EXPR: /* round toward zero */
669 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
673 case FLOOR_MOD_EXPR: /* round toward negative infinity */
674 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
677 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
680 else return overflow;
684 case CEIL_MOD_EXPR: /* round toward positive infinity */
685 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
687 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
690 else return overflow;
694 case ROUND_MOD_EXPR: /* round to closest integer */
696 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
697 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
699 /* get absolute values */
700 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
701 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
703 /* if (2 * abs (lrem) >= abs (lden)) */
704 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
705 labs_rem, habs_rem, <wice, &htwice);
706 if (((unsigned HOST_WIDE_INT) habs_den
707 < (unsigned HOST_WIDE_INT) htwice)
708 || (((unsigned HOST_WIDE_INT) habs_den
709 == (unsigned HOST_WIDE_INT) htwice)
710 && ((HOST_WIDE_INT unsigned) labs_den
711 < (unsigned HOST_WIDE_INT) ltwice)))
715 add_double (*lquo, *hquo,
716 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
719 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
722 else return overflow;
730 /* compute true remainder: rem = num - (quo * den) */
731 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
732 neg_double (*lrem, *hrem, lrem, hrem);
733 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
737 #ifndef REAL_ARITHMETIC
738 /* Effectively truncate a real value to represent the nearest possible value
739 in a narrower mode. The result is actually represented in the same data
740 type as the argument, but its value is usually different.
742 A trap may occur during the FP operations and it is the responsibility
743 of the calling function to have a handler established. */
746 real_value_truncate (mode, arg)
747 enum machine_mode mode;
750 return REAL_VALUE_TRUNCATE (mode, arg);
753 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
755 /* Check for infinity in an IEEE double precision number. */
761 /* The IEEE 64-bit double format. */
766 unsigned exponent : 11;
767 unsigned mantissa1 : 20;
772 unsigned mantissa1 : 20;
773 unsigned exponent : 11;
779 if (u.big_endian.sign == 1)
782 return (u.big_endian.exponent == 2047
783 && u.big_endian.mantissa1 == 0
784 && u.big_endian.mantissa2 == 0);
789 return (u.little_endian.exponent == 2047
790 && u.little_endian.mantissa1 == 0
791 && u.little_endian.mantissa2 == 0);
795 /* Check whether an IEEE double precision number is a NaN. */
801 /* The IEEE 64-bit double format. */
806 unsigned exponent : 11;
807 unsigned mantissa1 : 20;
812 unsigned mantissa1 : 20;
813 unsigned exponent : 11;
819 if (u.big_endian.sign == 1)
822 return (u.big_endian.exponent == 2047
823 && (u.big_endian.mantissa1 != 0
824 || u.big_endian.mantissa2 != 0));
829 return (u.little_endian.exponent == 2047
830 && (u.little_endian.mantissa1 != 0
831 || u.little_endian.mantissa2 != 0));
835 /* Check for a negative IEEE double precision number. */
841 /* The IEEE 64-bit double format. */
846 unsigned exponent : 11;
847 unsigned mantissa1 : 20;
852 unsigned mantissa1 : 20;
853 unsigned exponent : 11;
859 if (u.big_endian.sign == 1)
862 return u.big_endian.sign;
867 return u.little_endian.sign;
870 #else /* Target not IEEE */
872 /* Let's assume other float formats don't have infinity.
873 (This can be overridden by redefining REAL_VALUE_ISINF.) */
882 /* Let's assume other float formats don't have NaNs.
883 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
892 /* Let's assume other float formats don't have minus zero.
893 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
901 #endif /* Target not IEEE */
903 /* Try to change R into its exact multiplicative inverse in machine mode
904 MODE. Return nonzero function value if successful. */
907 exact_real_inverse (mode, r)
908 enum machine_mode mode;
919 /* Usually disable if bounds checks are not reliable. */
920 if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT) && !flag_pretend_float)
923 /* Set array index to the less significant bits in the unions, depending
924 on the endian-ness of the host doubles.
925 Disable if insufficient information on the data structure. */
926 #if HOST_FLOAT_FORMAT == UNKNOWN_FLOAT_FORMAT
929 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
932 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
935 #define K (2 * HOST_FLOAT_WORDS_BIG_ENDIAN)
940 if (setjmp (float_error))
942 /* Don't do the optimization if there was an arithmetic error. */
944 set_float_handler (NULL_PTR);
947 set_float_handler (float_error);
949 /* Domain check the argument. */
955 if (REAL_VALUE_ISINF (x.d) || REAL_VALUE_ISNAN (x.d))
959 /* Compute the reciprocal and check for numerical exactness.
960 It is unnecessary to check all the significand bits to determine
961 whether X is a power of 2. If X is not, then it is impossible for
962 the bottom half significand of both X and 1/X to be all zero bits.
963 Hence we ignore the data structure of the top half and examine only
964 the low order bits of the two significands. */
966 if (x.i[K] != 0 || x.i[K + 1] != 0 || t.i[K] != 0 || t.i[K + 1] != 0)
969 /* Truncate to the required mode and range-check the result. */
970 y.d = REAL_VALUE_TRUNCATE (mode, t.d);
971 #ifdef CHECK_FLOAT_VALUE
973 if (CHECK_FLOAT_VALUE (mode, y.d, i))
977 /* Fail if truncation changed the value. */
978 if (y.d != t.d || y.d == 0.0)
982 if (REAL_VALUE_ISINF (y.d) || REAL_VALUE_ISNAN (y.d))
986 /* Output the reciprocal and return success flag. */
987 set_float_handler (NULL_PTR);
992 /* Convert C9X hexadecimal floating point string constant S. Return
993 real value type in mode MODE. This function uses the host computer's
994 floating point arithmetic when there is no REAL_ARITHMETIC. */
997 real_hex_to_f (s, mode)
999 enum machine_mode mode;
1003 unsigned HOST_WIDE_INT low, high;
1004 int shcount, nrmcount, k;
1005 int sign, expsign, isfloat;
1006 int lost = 0;/* Nonzero low order bits shifted out and discarded. */
1007 int frexpon = 0; /* Bits after the decimal point. */
1008 int expon = 0; /* Value of exponent. */
1009 int decpt = 0; /* How many decimal points. */
1010 int gotp = 0; /* How many P's. */
1017 while (*p == ' ' || *p == '\t')
1020 /* Sign, if any, comes first. */
1028 /* The string is supposed to start with 0x or 0X . */
1032 if (*p == 'x' || *p == 'X')
1046 while ((c = *p) != '\0')
1048 if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')
1049 || (c >= 'a' && c <= 'f'))
1059 if ((high & 0xf0000000) == 0)
1061 high = (high << 4) + ((low >> 28) & 15);
1062 low = (low << 4) + k;
1069 /* Record nonzero lost bits. */
1082 else if (c == 'p' || c == 'P')
1086 /* Sign of exponent. */
1093 /* Value of exponent.
1094 The exponent field is a decimal integer. */
1097 k = (*p++ & 0x7f) - '0';
1098 expon = 10 * expon + k;
1102 /* F suffix is ambiguous in the significand part
1103 so it must appear after the decimal exponent field. */
1104 if (*p == 'f' || *p == 'F')
1112 else if (c == 'l' || c == 'L')
1121 /* Abort if last character read was not legitimate. */
1123 if ((c != '\0' && c != ' ' && c != '\n' && c != '\r') || (decpt > 1))
1126 /* There must be either one decimal point or one p. */
1127 if (decpt == 0 && gotp == 0)
1131 if (high == 0 && low == 0)
1143 /* Leave a high guard bit for carry-out. */
1144 if ((high & 0x80000000) != 0)
1147 low = (low >> 1) | (high << 31);
1152 if ((high & 0xffff8000) == 0)
1154 high = (high << 16) + ((low >> 16) & 0xffff);
1159 while ((high & 0xc0000000) == 0)
1161 high = (high << 1) + ((low >> 31) & 1);
1166 if (isfloat || GET_MODE_SIZE(mode) == UNITS_PER_WORD)
1168 /* Keep 24 bits precision, bits 0x7fffff80.
1169 Rounding bit is 0x40. */
1170 lost = lost | low | (high & 0x3f);
1174 if ((high & 0x80) || lost)
1181 /* We need real.c to do long double formats, so here default
1182 to double precision. */
1183 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1185 Keep 53 bits precision, bits 0x7fffffff fffffc00.
1186 Rounding bit is low word 0x200. */
1187 lost = lost | (low & 0x1ff);
1190 if ((low & 0x400) || lost)
1192 low = (low + 0x200) & 0xfffffc00;
1199 /* Assume it's a VAX with 56-bit significand,
1200 bits 0x7fffffff ffffff80. */
1201 lost = lost | (low & 0x7f);
1204 if ((low & 0x80) || lost)
1206 low = (low + 0x40) & 0xffffff80;
1216 ip = REAL_VALUE_LDEXP (ip, 32) + (double) low;
1217 /* Apply shifts and exponent value as power of 2. */
1218 ip = REAL_VALUE_LDEXP (ip, expon - (nrmcount + frexpon));
1225 #endif /* no REAL_ARITHMETIC */
1227 /* Given T, an expression, return the negation of T. Allow for T to be
1228 null, in which case return null. */
1240 type = TREE_TYPE (t);
1241 STRIP_SIGN_NOPS (t);
1243 switch (TREE_CODE (t))
1247 if (! TREE_UNSIGNED (type)
1248 && 0 != (tem = fold (build1 (NEGATE_EXPR, type, t)))
1249 && ! TREE_OVERFLOW (tem))
1254 return convert (type, TREE_OPERAND (t, 0));
1257 /* - (A - B) -> B - A */
1258 if (! FLOAT_TYPE_P (type) || flag_fast_math)
1259 return convert (type,
1260 fold (build (MINUS_EXPR, TREE_TYPE (t),
1261 TREE_OPERAND (t, 1),
1262 TREE_OPERAND (t, 0))));
1269 return convert (type, build1 (NEGATE_EXPR, TREE_TYPE (t), t));
1272 /* Split a tree IN into a constant, literal and variable parts that could be
1273 combined with CODE to make IN. "constant" means an expression with
1274 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1275 commutative arithmetic operation. Store the constant part into *CONP,
1276 the literal in &LITP and return the variable part. If a part isn't
1277 present, set it to null. If the tree does not decompose in this way,
1278 return the entire tree as the variable part and the other parts as null.
1280 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1281 case, we negate an operand that was subtracted. If NEGATE_P is true, we
1282 are negating all of IN.
1284 If IN is itself a literal or constant, return it as appropriate.
1286 Note that we do not guarantee that any of the three values will be the
1287 same type as IN, but they will have the same signedness and mode. */
1290 split_tree (in, code, conp, litp, negate_p)
1292 enum tree_code code;
1301 /* Strip any conversions that don't change the machine mode or signedness. */
1302 STRIP_SIGN_NOPS (in);
1304 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST)
1306 else if (TREE_CONSTANT (in))
1309 else if (TREE_CODE (in) == code
1310 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1311 /* We can associate addition and subtraction together (even
1312 though the C standard doesn't say so) for integers because
1313 the value is not affected. For reals, the value might be
1314 affected, so we can't. */
1315 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1316 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1318 tree op0 = TREE_OPERAND (in, 0);
1319 tree op1 = TREE_OPERAND (in, 1);
1320 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1321 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1323 /* First see if either of the operands is a literal, then a constant. */
1324 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST)
1325 *litp = op0, op0 = 0;
1326 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST)
1327 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1329 if (op0 != 0 && TREE_CONSTANT (op0))
1330 *conp = op0, op0 = 0;
1331 else if (op1 != 0 && TREE_CONSTANT (op1))
1332 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1334 /* If we haven't dealt with either operand, this is not a case we can
1335 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1336 if (op0 != 0 && op1 != 0)
1341 var = op1, neg_var_p = neg1_p;
1343 /* Now do any needed negations. */
1344 if (neg_litp_p) *litp = negate_expr (*litp);
1345 if (neg_conp_p) *conp = negate_expr (*conp);
1346 if (neg_var_p) var = negate_expr (var);
1353 var = negate_expr (var);
1354 *conp = negate_expr (*conp);
1355 *litp = negate_expr (*litp);
1361 /* Re-associate trees split by the above function. T1 and T2 are either
1362 expressions to associate or null. Return the new expression, if any. If
1363 we build an operation, do it in TYPE and with CODE, except if CODE is a
1364 MINUS_EXPR, in which case we use PLUS_EXPR since split_tree will already
1365 have taken care of the negations. */
1368 associate_trees (t1, t2, code, type)
1370 enum tree_code code;
1378 if (code == MINUS_EXPR)
1381 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1382 try to fold this since we will have infinite recursion. But do
1383 deal with any NEGATE_EXPRs. */
1384 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1385 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1387 if (TREE_CODE (t1) == NEGATE_EXPR)
1388 return build (MINUS_EXPR, type, convert (type, t2),
1389 convert (type, TREE_OPERAND (t1, 0)));
1390 else if (TREE_CODE (t2) == NEGATE_EXPR)
1391 return build (MINUS_EXPR, type, convert (type, t1),
1392 convert (type, TREE_OPERAND (t2, 0)));
1394 return build (code, type, convert (type, t1), convert (type, t2));
1397 return fold (build (code, type, convert (type, t1), convert (type, t2)));
1400 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1401 to produce a new constant.
1403 If NOTRUNC is nonzero, do not truncate the result to fit the data type.
1404 If FORSIZE is nonzero, compute overflow for unsigned types. */
1407 int_const_binop (code, arg1, arg2, notrunc, forsize)
1408 enum tree_code code;
1409 register tree arg1, arg2;
1410 int notrunc, forsize;
1412 HOST_WIDE_INT int1l, int1h, int2l, int2h;
1413 HOST_WIDE_INT low, hi;
1414 HOST_WIDE_INT garbagel, garbageh;
1416 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1418 int no_overflow = 0;
1420 int1l = TREE_INT_CST_LOW (arg1);
1421 int1h = TREE_INT_CST_HIGH (arg1);
1422 int2l = TREE_INT_CST_LOW (arg2);
1423 int2h = TREE_INT_CST_HIGH (arg2);
1428 low = int1l | int2l, hi = int1h | int2h;
1432 low = int1l ^ int2l, hi = int1h ^ int2h;
1436 low = int1l & int2l, hi = int1h & int2h;
1439 case BIT_ANDTC_EXPR:
1440 low = int1l & ~int2l, hi = int1h & ~int2h;
1446 /* It's unclear from the C standard whether shifts can overflow.
1447 The following code ignores overflow; perhaps a C standard
1448 interpretation ruling is needed. */
1449 lshift_double (int1l, int1h, int2l,
1450 TYPE_PRECISION (TREE_TYPE (arg1)),
1459 lrotate_double (int1l, int1h, int2l,
1460 TYPE_PRECISION (TREE_TYPE (arg1)),
1465 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1469 neg_double (int2l, int2h, &low, &hi);
1470 add_double (int1l, int1h, low, hi, &low, &hi);
1471 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1475 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1478 case TRUNC_DIV_EXPR:
1479 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1480 case EXACT_DIV_EXPR:
1481 /* This is a shortcut for a common special case. */
1482 if (int2h == 0 && int2l > 0
1483 && ! TREE_CONSTANT_OVERFLOW (arg1)
1484 && ! TREE_CONSTANT_OVERFLOW (arg2)
1485 && int1h == 0 && int1l >= 0)
1487 if (code == CEIL_DIV_EXPR)
1489 low = int1l / int2l, hi = 0;
1493 /* ... fall through ... */
1495 case ROUND_DIV_EXPR:
1496 if (int2h == 0 && int2l == 1)
1498 low = int1l, hi = int1h;
1501 if (int1l == int2l && int1h == int2h
1502 && ! (int1l == 0 && int1h == 0))
1507 overflow = div_and_round_double (code, uns,
1508 int1l, int1h, int2l, int2h,
1509 &low, &hi, &garbagel, &garbageh);
1512 case TRUNC_MOD_EXPR:
1513 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1514 /* This is a shortcut for a common special case. */
1515 if (int2h == 0 && int2l > 0
1516 && ! TREE_CONSTANT_OVERFLOW (arg1)
1517 && ! TREE_CONSTANT_OVERFLOW (arg2)
1518 && int1h == 0 && int1l >= 0)
1520 if (code == CEIL_MOD_EXPR)
1522 low = int1l % int2l, hi = 0;
1526 /* ... fall through ... */
1528 case ROUND_MOD_EXPR:
1529 overflow = div_and_round_double (code, uns,
1530 int1l, int1h, int2l, int2h,
1531 &garbagel, &garbageh, &low, &hi);
1537 low = (((unsigned HOST_WIDE_INT) int1h
1538 < (unsigned HOST_WIDE_INT) int2h)
1539 || (((unsigned HOST_WIDE_INT) int1h
1540 == (unsigned HOST_WIDE_INT) int2h)
1541 && ((unsigned HOST_WIDE_INT) int1l
1542 < (unsigned HOST_WIDE_INT) int2l)));
1544 low = ((int1h < int2h)
1545 || ((int1h == int2h)
1546 && ((unsigned HOST_WIDE_INT) int1l
1547 < (unsigned HOST_WIDE_INT) int2l)));
1549 if (low == (code == MIN_EXPR))
1550 low = int1l, hi = int1h;
1552 low = int2l, hi = int2h;
1559 if (TREE_TYPE (arg1) == sizetype && hi == 0
1561 && (TYPE_MAX_VALUE (sizetype) == NULL
1562 || low <= TREE_INT_CST_LOW (TYPE_MAX_VALUE (sizetype)))
1564 && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2))
1568 t = build_int_2 (low, hi);
1569 TREE_TYPE (t) = TREE_TYPE (arg1);
1573 = ((notrunc ? (!uns || forsize) && overflow
1574 : force_fit_type (t, (!uns || forsize) && overflow) && ! no_overflow)
1575 | TREE_OVERFLOW (arg1)
1576 | TREE_OVERFLOW (arg2));
1578 /* If we're doing a size calculation, unsigned arithmetic does overflow.
1579 So check if force_fit_type truncated the value. */
1581 && ! TREE_OVERFLOW (t)
1582 && (TREE_INT_CST_HIGH (t) != hi
1583 || TREE_INT_CST_LOW (t) != low))
1584 TREE_OVERFLOW (t) = 1;
1586 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1587 | TREE_CONSTANT_OVERFLOW (arg1)
1588 | TREE_CONSTANT_OVERFLOW (arg2));
1592 /* Define input and output argument for const_binop_1. */
1595 enum tree_code code; /* Input: tree code for operation*/
1596 tree type; /* Input: tree type for operation. */
1597 REAL_VALUE_TYPE d1, d2; /* Input: floating point operands. */
1598 tree t; /* Output: constant for result. */
1601 /* Do the real arithmetic for const_binop while protected by a
1602 float overflow handler. */
1605 const_binop_1 (data)
1608 struct cb_args *args = (struct cb_args *) data;
1609 REAL_VALUE_TYPE value;
1611 #ifdef REAL_ARITHMETIC
1612 REAL_ARITHMETIC (value, args->code, args->d1, args->d2);
1617 value = args->d1 + args->d2;
1621 value = args->d1 - args->d2;
1625 value = args->d1 * args->d2;
1629 #ifndef REAL_INFINITY
1634 value = args->d1 / args->d2;
1638 value = MIN (args->d1, args->d2);
1642 value = MAX (args->d1, args->d2);
1648 #endif /* no REAL_ARITHMETIC */
1651 = build_real (args->type,
1652 real_value_truncate (TYPE_MODE (args->type), value));
1655 /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1656 constant. We assume ARG1 and ARG2 have the same data type, or at least
1657 are the same kind of constant and the same machine mode.
1659 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1662 const_binop (code, arg1, arg2, notrunc)
1663 enum tree_code code;
1664 register tree arg1, arg2;
1667 STRIP_NOPS (arg1); STRIP_NOPS (arg2);
1669 if (TREE_CODE (arg1) == INTEGER_CST)
1670 return int_const_binop (code, arg1, arg2, notrunc, 0);
1672 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1673 if (TREE_CODE (arg1) == REAL_CST)
1679 struct cb_args args;
1681 d1 = TREE_REAL_CST (arg1);
1682 d2 = TREE_REAL_CST (arg2);
1684 /* If either operand is a NaN, just return it. Otherwise, set up
1685 for floating-point trap; we return an overflow. */
1686 if (REAL_VALUE_ISNAN (d1))
1688 else if (REAL_VALUE_ISNAN (d2))
1691 /* Setup input for const_binop_1() */
1692 args.type = TREE_TYPE (arg1);
1697 if (do_float_handler (const_binop_1, (PTR) &args))
1698 /* Receive output from const_binop_1. */
1702 /* We got an exception from const_binop_1. */
1703 t = copy_node (arg1);
1708 = (force_fit_type (t, overflow)
1709 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1710 TREE_CONSTANT_OVERFLOW (t)
1712 | TREE_CONSTANT_OVERFLOW (arg1)
1713 | TREE_CONSTANT_OVERFLOW (arg2);
1716 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1717 if (TREE_CODE (arg1) == COMPLEX_CST)
1719 register tree type = TREE_TYPE (arg1);
1720 register tree r1 = TREE_REALPART (arg1);
1721 register tree i1 = TREE_IMAGPART (arg1);
1722 register tree r2 = TREE_REALPART (arg2);
1723 register tree i2 = TREE_IMAGPART (arg2);
1729 t = build_complex (type,
1730 const_binop (PLUS_EXPR, r1, r2, notrunc),
1731 const_binop (PLUS_EXPR, i1, i2, notrunc));
1735 t = build_complex (type,
1736 const_binop (MINUS_EXPR, r1, r2, notrunc),
1737 const_binop (MINUS_EXPR, i1, i2, notrunc));
1741 t = build_complex (type,
1742 const_binop (MINUS_EXPR,
1743 const_binop (MULT_EXPR,
1745 const_binop (MULT_EXPR,
1748 const_binop (PLUS_EXPR,
1749 const_binop (MULT_EXPR,
1751 const_binop (MULT_EXPR,
1758 register tree magsquared
1759 = const_binop (PLUS_EXPR,
1760 const_binop (MULT_EXPR, r2, r2, notrunc),
1761 const_binop (MULT_EXPR, i2, i2, notrunc),
1764 t = build_complex (type,
1766 (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1767 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1768 const_binop (PLUS_EXPR,
1769 const_binop (MULT_EXPR, r1, r2,
1771 const_binop (MULT_EXPR, i1, i2,
1774 magsquared, notrunc),
1776 (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1777 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1778 const_binop (MINUS_EXPR,
1779 const_binop (MULT_EXPR, i1, r2,
1781 const_binop (MULT_EXPR, r1, i2,
1784 magsquared, notrunc));
1796 /* Return an INTEGER_CST with value whose HOST_BITS_PER_WIDE_INT bits are
1797 given by HIGH and whose HOST_BITS_PER_WIDE_INT bits are given by NUMBER.
1799 If BIT_P is nonzero, this represents a size in bit and the type of the
1800 result will be bitsizetype, othewise it represents a size in bytes and
1801 the type of the result will be sizetype. */
1804 size_int_wide (number, high, bit_p)
1805 unsigned HOST_WIDE_INT number, high;
1808 /* Type-size nodes already made for small sizes. */
1809 static tree size_table[2 * HOST_BITS_PER_WIDE_INT + 1][2];
1810 static int init_p = 0;
1813 if (ggc_p && ! init_p)
1815 ggc_add_tree_root ((tree *) size_table,
1816 sizeof size_table / sizeof (tree));
1820 if (number < 2*HOST_BITS_PER_WIDE_INT + 1 && high == 0
1821 && size_table[number][bit_p] != 0)
1822 return size_table[number][bit_p];
1824 if (number < 2*HOST_BITS_PER_WIDE_INT + 1 && high == 0)
1828 /* Make this a permanent node. */
1829 push_obstacks_nochange ();
1830 end_temporary_allocation ();
1833 t = build_int_2 (number, 0);
1834 TREE_TYPE (t) = bit_p ? bitsizetype : sizetype;
1835 size_table[number][bit_p] = t;
1843 t = build_int_2 (number, high);
1844 TREE_TYPE (t) = bit_p ? bitsizetype : sizetype;
1845 TREE_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (t) = force_fit_type (t, 0);
1849 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1850 CODE is a tree code. Data type is taken from `sizetype',
1851 If the operands are constant, so is the result. */
1854 size_binop (code, arg0, arg1)
1855 enum tree_code code;
1858 /* Handle the special case of two integer constants faster. */
1859 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1861 /* And some specific cases even faster than that. */
1862 if (code == PLUS_EXPR && integer_zerop (arg0))
1864 else if ((code == MINUS_EXPR || code == PLUS_EXPR)
1865 && integer_zerop (arg1))
1867 else if (code == MULT_EXPR && integer_onep (arg0))
1870 /* Handle general case of two integer constants. */
1871 return int_const_binop (code, arg0, arg1, 0, 1);
1874 if (arg0 == error_mark_node || arg1 == error_mark_node)
1875 return error_mark_node;
1877 return fold (build (code, sizetype, arg0, arg1));
1880 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1881 CODE is a tree code. Data type is taken from `ssizetype',
1882 If the operands are constant, so is the result. */
1885 ssize_binop (code, arg0, arg1)
1886 enum tree_code code;
1889 /* Handle the special case of two integer constants faster. */
1890 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1892 /* And some specific cases even faster than that. */
1893 if (code == PLUS_EXPR && integer_zerop (arg0))
1895 else if ((code == MINUS_EXPR || code == PLUS_EXPR)
1896 && integer_zerop (arg1))
1898 else if (code == MULT_EXPR && integer_onep (arg0))
1901 /* Handle general case of two integer constants. We convert
1902 arg0 to ssizetype because int_const_binop uses its type for the
1904 arg0 = convert (ssizetype, arg0);
1905 return int_const_binop (code, arg0, arg1, 0, 0);
1908 if (arg0 == error_mark_node || arg1 == error_mark_node)
1909 return error_mark_node;
1911 return fold (build (code, ssizetype, arg0, arg1));
1914 /* This structure is used to communicate arguments to fold_convert_1. */
1917 tree arg1; /* Input: value to convert. */
1918 tree type; /* Input: type to convert value to. */
1919 tree t; /* Ouput: result of conversion. */
1922 /* Function to convert floating-point constants, protected by floating
1923 point exception handler. */
1926 fold_convert_1 (data)
1929 struct fc_args * args = (struct fc_args *) data;
1931 args->t = build_real (args->type,
1932 real_value_truncate (TYPE_MODE (args->type),
1933 TREE_REAL_CST (args->arg1)));
1936 /* Given T, a tree representing type conversion of ARG1, a constant,
1937 return a constant tree representing the result of conversion. */
1940 fold_convert (t, arg1)
1944 register tree type = TREE_TYPE (t);
1947 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
1949 if (TREE_CODE (arg1) == INTEGER_CST)
1951 /* If we would build a constant wider than GCC supports,
1952 leave the conversion unfolded. */
1953 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1956 /* Given an integer constant, make new constant with new type,
1957 appropriately sign-extended or truncated. */
1958 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1959 TREE_INT_CST_HIGH (arg1));
1960 TREE_TYPE (t) = type;
1961 /* Indicate an overflow if (1) ARG1 already overflowed,
1962 or (2) force_fit_type indicates an overflow.
1963 Tell force_fit_type that an overflow has already occurred
1964 if ARG1 is a too-large unsigned value and T is signed.
1965 But don't indicate an overflow if converting a pointer. */
1967 = ((force_fit_type (t,
1968 (TREE_INT_CST_HIGH (arg1) < 0
1969 && (TREE_UNSIGNED (type)
1970 < TREE_UNSIGNED (TREE_TYPE (arg1)))))
1971 && ! POINTER_TYPE_P (TREE_TYPE (arg1)))
1972 || TREE_OVERFLOW (arg1));
1973 TREE_CONSTANT_OVERFLOW (t)
1974 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1976 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1977 else if (TREE_CODE (arg1) == REAL_CST)
1979 /* Don't initialize these, use assignments.
1980 Initialized local aggregates don't work on old compilers. */
1984 tree type1 = TREE_TYPE (arg1);
1987 x = TREE_REAL_CST (arg1);
1988 l = real_value_from_int_cst (type1, TYPE_MIN_VALUE (type));
1990 no_upper_bound = (TYPE_MAX_VALUE (type) == NULL);
1991 if (!no_upper_bound)
1992 u = real_value_from_int_cst (type1, TYPE_MAX_VALUE (type));
1994 /* See if X will be in range after truncation towards 0.
1995 To compensate for truncation, move the bounds away from 0,
1996 but reject if X exactly equals the adjusted bounds. */
1997 #ifdef REAL_ARITHMETIC
1998 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1999 if (!no_upper_bound)
2000 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
2003 if (!no_upper_bound)
2006 /* If X is a NaN, use zero instead and show we have an overflow.
2007 Otherwise, range check. */
2008 if (REAL_VALUE_ISNAN (x))
2009 overflow = 1, x = dconst0;
2010 else if (! (REAL_VALUES_LESS (l, x)
2012 && REAL_VALUES_LESS (x, u)))
2015 #ifndef REAL_ARITHMETIC
2017 HOST_WIDE_INT low, high;
2018 HOST_WIDE_INT half_word
2019 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
2024 high = (HOST_WIDE_INT) (x / half_word / half_word);
2025 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
2026 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
2028 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
2029 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
2032 low = (HOST_WIDE_INT) x;
2033 if (TREE_REAL_CST (arg1) < 0)
2034 neg_double (low, high, &low, &high);
2035 t = build_int_2 (low, high);
2039 HOST_WIDE_INT low, high;
2040 REAL_VALUE_TO_INT (&low, &high, x);
2041 t = build_int_2 (low, high);
2044 TREE_TYPE (t) = type;
2046 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
2047 TREE_CONSTANT_OVERFLOW (t)
2048 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
2050 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2051 TREE_TYPE (t) = type;
2053 else if (TREE_CODE (type) == REAL_TYPE)
2055 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2056 if (TREE_CODE (arg1) == INTEGER_CST)
2057 return build_real_from_int_cst (type, arg1);
2058 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2059 if (TREE_CODE (arg1) == REAL_CST)
2061 struct fc_args args;
2063 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
2066 TREE_TYPE (arg1) = type;
2070 /* Setup input for fold_convert_1() */
2074 if (do_float_handler (fold_convert_1, (PTR) &args))
2076 /* Receive output from fold_convert_1() */
2081 /* We got an exception from fold_convert_1() */
2083 t = copy_node (arg1);
2087 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
2088 TREE_CONSTANT_OVERFLOW (t)
2089 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
2093 TREE_CONSTANT (t) = 1;
2097 /* Return an expr equal to X but certainly not valid as an lvalue. */
2105 /* These things are certainly not lvalues. */
2106 if (TREE_CODE (x) == NON_LVALUE_EXPR
2107 || TREE_CODE (x) == INTEGER_CST
2108 || TREE_CODE (x) == REAL_CST
2109 || TREE_CODE (x) == STRING_CST
2110 || TREE_CODE (x) == ADDR_EXPR)
2113 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2114 TREE_CONSTANT (result) = TREE_CONSTANT (x);
2118 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2119 Zero means allow extended lvalues. */
2121 int pedantic_lvalues;
2123 /* When pedantic, return an expr equal to X but certainly not valid as a
2124 pedantic lvalue. Otherwise, return X. */
2127 pedantic_non_lvalue (x)
2130 if (pedantic_lvalues)
2131 return non_lvalue (x);
2136 /* Given a tree comparison code, return the code that is the logical inverse
2137 of the given code. It is not safe to do this for floating-point
2138 comparisons, except for NE_EXPR and EQ_EXPR. */
2140 static enum tree_code
2141 invert_tree_comparison (code)
2142 enum tree_code code;
2163 /* Similar, but return the comparison that results if the operands are
2164 swapped. This is safe for floating-point. */
2166 static enum tree_code
2167 swap_tree_comparison (code)
2168 enum tree_code code;
2188 /* Return nonzero if CODE is a tree code that represents a truth value. */
2191 truth_value_p (code)
2192 enum tree_code code;
2194 return (TREE_CODE_CLASS (code) == '<'
2195 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
2196 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
2197 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
2200 /* Return nonzero if two operands are necessarily equal.
2201 If ONLY_CONST is non-zero, only return non-zero for constants.
2202 This function tests whether the operands are indistinguishable;
2203 it does not test whether they are equal using C's == operation.
2204 The distinction is important for IEEE floating point, because
2205 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
2206 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
2209 operand_equal_p (arg0, arg1, only_const)
2213 /* If both types don't have the same signedness, then we can't consider
2214 them equal. We must check this before the STRIP_NOPS calls
2215 because they may change the signedness of the arguments. */
2216 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
2222 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2223 /* This is needed for conversions and for COMPONENT_REF.
2224 Might as well play it safe and always test this. */
2225 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
2226 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
2227 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
2230 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
2231 We don't care about side effects in that case because the SAVE_EXPR
2232 takes care of that for us. In all other cases, two expressions are
2233 equal if they have no side effects. If we have two identical
2234 expressions with side effects that should be treated the same due
2235 to the only side effects being identical SAVE_EXPR's, that will
2236 be detected in the recursive calls below. */
2237 if (arg0 == arg1 && ! only_const
2238 && (TREE_CODE (arg0) == SAVE_EXPR
2239 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
2242 /* Next handle constant cases, those for which we can return 1 even
2243 if ONLY_CONST is set. */
2244 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
2245 switch (TREE_CODE (arg0))
2248 return (! TREE_CONSTANT_OVERFLOW (arg0)
2249 && ! TREE_CONSTANT_OVERFLOW (arg1)
2250 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
2251 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1));
2254 return (! TREE_CONSTANT_OVERFLOW (arg0)
2255 && ! TREE_CONSTANT_OVERFLOW (arg1)
2256 && REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
2257 TREE_REAL_CST (arg1)));
2260 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
2262 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
2266 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
2267 && ! memcmp (TREE_STRING_POINTER (arg0),
2268 TREE_STRING_POINTER (arg1),
2269 TREE_STRING_LENGTH (arg0)));
2272 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
2281 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
2284 /* Two conversions are equal only if signedness and modes match. */
2285 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
2286 && (TREE_UNSIGNED (TREE_TYPE (arg0))
2287 != TREE_UNSIGNED (TREE_TYPE (arg1))))
2290 return operand_equal_p (TREE_OPERAND (arg0, 0),
2291 TREE_OPERAND (arg1, 0), 0);
2295 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)
2296 && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1),
2300 /* For commutative ops, allow the other order. */
2301 return ((TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MULT_EXPR
2302 || TREE_CODE (arg0) == MIN_EXPR || TREE_CODE (arg0) == MAX_EXPR
2303 || TREE_CODE (arg0) == BIT_IOR_EXPR
2304 || TREE_CODE (arg0) == BIT_XOR_EXPR
2305 || TREE_CODE (arg0) == BIT_AND_EXPR
2306 || TREE_CODE (arg0) == NE_EXPR || TREE_CODE (arg0) == EQ_EXPR)
2307 && operand_equal_p (TREE_OPERAND (arg0, 0),
2308 TREE_OPERAND (arg1, 1), 0)
2309 && operand_equal_p (TREE_OPERAND (arg0, 1),
2310 TREE_OPERAND (arg1, 0), 0));
2313 /* If either of the pointer (or reference) expressions we are dereferencing
2314 contain a side effect, these cannot be equal. */
2315 if (TREE_SIDE_EFFECTS (arg0)
2316 || TREE_SIDE_EFFECTS (arg1))
2319 switch (TREE_CODE (arg0))
2322 return operand_equal_p (TREE_OPERAND (arg0, 0),
2323 TREE_OPERAND (arg1, 0), 0);
2327 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2328 TREE_OPERAND (arg1, 0), 0)
2329 && operand_equal_p (TREE_OPERAND (arg0, 1),
2330 TREE_OPERAND (arg1, 1), 0));
2333 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2334 TREE_OPERAND (arg1, 0), 0)
2335 && operand_equal_p (TREE_OPERAND (arg0, 1),
2336 TREE_OPERAND (arg1, 1), 0)
2337 && operand_equal_p (TREE_OPERAND (arg0, 2),
2338 TREE_OPERAND (arg1, 2), 0));
2344 if (TREE_CODE (arg0) == RTL_EXPR)
2345 return rtx_equal_p (RTL_EXPR_RTL (arg0), RTL_EXPR_RTL (arg1));
2353 /* Similar to operand_equal_p, but see if ARG0 might have been made by
2354 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
2356 When in doubt, return 0. */
2359 operand_equal_for_comparison_p (arg0, arg1, other)
2363 int unsignedp1, unsignedpo;
2364 tree primarg0, primarg1, primother;
2365 unsigned correct_width;
2367 if (operand_equal_p (arg0, arg1, 0))
2370 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
2371 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
2374 /* Discard any conversions that don't change the modes of ARG0 and ARG1
2375 and see if the inner values are the same. This removes any
2376 signedness comparison, which doesn't matter here. */
2377 primarg0 = arg0, primarg1 = arg1;
2378 STRIP_NOPS (primarg0); STRIP_NOPS (primarg1);
2379 if (operand_equal_p (primarg0, primarg1, 0))
2382 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
2383 actual comparison operand, ARG0.
2385 First throw away any conversions to wider types
2386 already present in the operands. */
2388 primarg1 = get_narrower (arg1, &unsignedp1);
2389 primother = get_narrower (other, &unsignedpo);
2391 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
2392 if (unsignedp1 == unsignedpo
2393 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
2394 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
2396 tree type = TREE_TYPE (arg0);
2398 /* Make sure shorter operand is extended the right way
2399 to match the longer operand. */
2400 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
2401 TREE_TYPE (primarg1)),
2404 if (operand_equal_p (arg0, convert (type, primarg1), 0))
2411 /* See if ARG is an expression that is either a comparison or is performing
2412 arithmetic on comparisons. The comparisons must only be comparing
2413 two different values, which will be stored in *CVAL1 and *CVAL2; if
2414 they are non-zero it means that some operands have already been found.
2415 No variables may be used anywhere else in the expression except in the
2416 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
2417 the expression and save_expr needs to be called with CVAL1 and CVAL2.
2419 If this is true, return 1. Otherwise, return zero. */
2422 twoval_comparison_p (arg, cval1, cval2, save_p)
2424 tree *cval1, *cval2;
2427 enum tree_code code = TREE_CODE (arg);
2428 char class = TREE_CODE_CLASS (code);
2430 /* We can handle some of the 'e' cases here. */
2431 if (class == 'e' && code == TRUTH_NOT_EXPR)
2433 else if (class == 'e'
2434 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
2435 || code == COMPOUND_EXPR))
2438 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0
2439 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
2441 /* If we've already found a CVAL1 or CVAL2, this expression is
2442 two complex to handle. */
2443 if (*cval1 || *cval2)
2453 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
2456 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
2457 && twoval_comparison_p (TREE_OPERAND (arg, 1),
2458 cval1, cval2, save_p));
2464 if (code == COND_EXPR)
2465 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
2466 cval1, cval2, save_p)
2467 && twoval_comparison_p (TREE_OPERAND (arg, 1),
2468 cval1, cval2, save_p)
2469 && twoval_comparison_p (TREE_OPERAND (arg, 2),
2470 cval1, cval2, save_p));
2474 /* First see if we can handle the first operand, then the second. For
2475 the second operand, we know *CVAL1 can't be zero. It must be that
2476 one side of the comparison is each of the values; test for the
2477 case where this isn't true by failing if the two operands
2480 if (operand_equal_p (TREE_OPERAND (arg, 0),
2481 TREE_OPERAND (arg, 1), 0))
2485 *cval1 = TREE_OPERAND (arg, 0);
2486 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
2488 else if (*cval2 == 0)
2489 *cval2 = TREE_OPERAND (arg, 0);
2490 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
2495 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
2497 else if (*cval2 == 0)
2498 *cval2 = TREE_OPERAND (arg, 1);
2499 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
2511 /* ARG is a tree that is known to contain just arithmetic operations and
2512 comparisons. Evaluate the operations in the tree substituting NEW0 for
2513 any occurrence of OLD0 as an operand of a comparison and likewise for
2517 eval_subst (arg, old0, new0, old1, new1)
2519 tree old0, new0, old1, new1;
2521 tree type = TREE_TYPE (arg);
2522 enum tree_code code = TREE_CODE (arg);
2523 char class = TREE_CODE_CLASS (code);
2525 /* We can handle some of the 'e' cases here. */
2526 if (class == 'e' && code == TRUTH_NOT_EXPR)
2528 else if (class == 'e'
2529 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2535 return fold (build1 (code, type,
2536 eval_subst (TREE_OPERAND (arg, 0),
2537 old0, new0, old1, new1)));
2540 return fold (build (code, type,
2541 eval_subst (TREE_OPERAND (arg, 0),
2542 old0, new0, old1, new1),
2543 eval_subst (TREE_OPERAND (arg, 1),
2544 old0, new0, old1, new1)));
2550 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2553 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2556 return fold (build (code, type,
2557 eval_subst (TREE_OPERAND (arg, 0),
2558 old0, new0, old1, new1),
2559 eval_subst (TREE_OPERAND (arg, 1),
2560 old0, new0, old1, new1),
2561 eval_subst (TREE_OPERAND (arg, 2),
2562 old0, new0, old1, new1)));
2566 /* fall through - ??? */
2570 tree arg0 = TREE_OPERAND (arg, 0);
2571 tree arg1 = TREE_OPERAND (arg, 1);
2573 /* We need to check both for exact equality and tree equality. The
2574 former will be true if the operand has a side-effect. In that
2575 case, we know the operand occurred exactly once. */
2577 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2579 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2582 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2584 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2587 return fold (build (code, type, arg0, arg1));
2595 /* Return a tree for the case when the result of an expression is RESULT
2596 converted to TYPE and OMITTED was previously an operand of the expression
2597 but is now not needed (e.g., we folded OMITTED * 0).
2599 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2600 the conversion of RESULT to TYPE. */
2603 omit_one_operand (type, result, omitted)
2604 tree type, result, omitted;
2606 tree t = convert (type, result);
2608 if (TREE_SIDE_EFFECTS (omitted))
2609 return build (COMPOUND_EXPR, type, omitted, t);
2611 return non_lvalue (t);
2614 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2617 pedantic_omit_one_operand (type, result, omitted)
2618 tree type, result, omitted;
2620 tree t = convert (type, result);
2622 if (TREE_SIDE_EFFECTS (omitted))
2623 return build (COMPOUND_EXPR, type, omitted, t);
2625 return pedantic_non_lvalue (t);
2630 /* Return a simplified tree node for the truth-negation of ARG. This
2631 never alters ARG itself. We assume that ARG is an operation that
2632 returns a truth value (0 or 1). */
2635 invert_truthvalue (arg)
2638 tree type = TREE_TYPE (arg);
2639 enum tree_code code = TREE_CODE (arg);
2641 if (code == ERROR_MARK)
2644 /* If this is a comparison, we can simply invert it, except for
2645 floating-point non-equality comparisons, in which case we just
2646 enclose a TRUTH_NOT_EXPR around what we have. */
2648 if (TREE_CODE_CLASS (code) == '<')
2650 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2651 && !flag_fast_math && code != NE_EXPR && code != EQ_EXPR)
2652 return build1 (TRUTH_NOT_EXPR, type, arg);
2654 return build (invert_tree_comparison (code), type,
2655 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2661 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2662 && TREE_INT_CST_HIGH (arg) == 0, 0));
2664 case TRUTH_AND_EXPR:
2665 return build (TRUTH_OR_EXPR, type,
2666 invert_truthvalue (TREE_OPERAND (arg, 0)),
2667 invert_truthvalue (TREE_OPERAND (arg, 1)));
2670 return build (TRUTH_AND_EXPR, type,
2671 invert_truthvalue (TREE_OPERAND (arg, 0)),
2672 invert_truthvalue (TREE_OPERAND (arg, 1)));
2674 case TRUTH_XOR_EXPR:
2675 /* Here we can invert either operand. We invert the first operand
2676 unless the second operand is a TRUTH_NOT_EXPR in which case our
2677 result is the XOR of the first operand with the inside of the
2678 negation of the second operand. */
2680 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2681 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2682 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2684 return build (TRUTH_XOR_EXPR, type,
2685 invert_truthvalue (TREE_OPERAND (arg, 0)),
2686 TREE_OPERAND (arg, 1));
2688 case TRUTH_ANDIF_EXPR:
2689 return build (TRUTH_ORIF_EXPR, type,
2690 invert_truthvalue (TREE_OPERAND (arg, 0)),
2691 invert_truthvalue (TREE_OPERAND (arg, 1)));
2693 case TRUTH_ORIF_EXPR:
2694 return build (TRUTH_ANDIF_EXPR, type,
2695 invert_truthvalue (TREE_OPERAND (arg, 0)),
2696 invert_truthvalue (TREE_OPERAND (arg, 1)));
2698 case TRUTH_NOT_EXPR:
2699 return TREE_OPERAND (arg, 0);
2702 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2703 invert_truthvalue (TREE_OPERAND (arg, 1)),
2704 invert_truthvalue (TREE_OPERAND (arg, 2)));
2707 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2708 invert_truthvalue (TREE_OPERAND (arg, 1)));
2710 case WITH_RECORD_EXPR:
2711 return build (WITH_RECORD_EXPR, type,
2712 invert_truthvalue (TREE_OPERAND (arg, 0)),
2713 TREE_OPERAND (arg, 1));
2715 case NON_LVALUE_EXPR:
2716 return invert_truthvalue (TREE_OPERAND (arg, 0));
2721 return build1 (TREE_CODE (arg), type,
2722 invert_truthvalue (TREE_OPERAND (arg, 0)));
2725 if (!integer_onep (TREE_OPERAND (arg, 1)))
2727 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2730 return build1 (TRUTH_NOT_EXPR, type, arg);
2732 case CLEANUP_POINT_EXPR:
2733 return build1 (CLEANUP_POINT_EXPR, type,
2734 invert_truthvalue (TREE_OPERAND (arg, 0)));
2739 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2741 return build1 (TRUTH_NOT_EXPR, type, arg);
2744 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2745 operands are another bit-wise operation with a common input. If so,
2746 distribute the bit operations to save an operation and possibly two if
2747 constants are involved. For example, convert
2748 (A | B) & (A | C) into A | (B & C)
2749 Further simplification will occur if B and C are constants.
2751 If this optimization cannot be done, 0 will be returned. */
2754 distribute_bit_expr (code, type, arg0, arg1)
2755 enum tree_code code;
2762 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2763 || TREE_CODE (arg0) == code
2764 || (TREE_CODE (arg0) != BIT_AND_EXPR
2765 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2768 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2770 common = TREE_OPERAND (arg0, 0);
2771 left = TREE_OPERAND (arg0, 1);
2772 right = TREE_OPERAND (arg1, 1);
2774 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2776 common = TREE_OPERAND (arg0, 0);
2777 left = TREE_OPERAND (arg0, 1);
2778 right = TREE_OPERAND (arg1, 0);
2780 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2782 common = TREE_OPERAND (arg0, 1);
2783 left = TREE_OPERAND (arg0, 0);
2784 right = TREE_OPERAND (arg1, 1);
2786 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2788 common = TREE_OPERAND (arg0, 1);
2789 left = TREE_OPERAND (arg0, 0);
2790 right = TREE_OPERAND (arg1, 0);
2795 return fold (build (TREE_CODE (arg0), type, common,
2796 fold (build (code, type, left, right))));
2799 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2800 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2803 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2806 int bitsize, bitpos;
2809 tree result = build (BIT_FIELD_REF, type, inner,
2810 size_int (bitsize), bitsize_int (bitpos, 0L));
2812 TREE_UNSIGNED (result) = unsignedp;
2817 /* Optimize a bit-field compare.
2819 There are two cases: First is a compare against a constant and the
2820 second is a comparison of two items where the fields are at the same
2821 bit position relative to the start of a chunk (byte, halfword, word)
2822 large enough to contain it. In these cases we can avoid the shift
2823 implicit in bitfield extractions.
2825 For constants, we emit a compare of the shifted constant with the
2826 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2827 compared. For two fields at the same position, we do the ANDs with the
2828 similar mask and compare the result of the ANDs.
2830 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2831 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2832 are the left and right operands of the comparison, respectively.
2834 If the optimization described above can be done, we return the resulting
2835 tree. Otherwise we return zero. */
2838 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2839 enum tree_code code;
2843 int lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize;
2844 tree type = TREE_TYPE (lhs);
2845 tree signed_type, unsigned_type;
2846 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2847 enum machine_mode lmode, rmode, nmode;
2848 int lunsignedp, runsignedp;
2849 int lvolatilep = 0, rvolatilep = 0;
2851 tree linner, rinner = NULL_TREE;
2855 /* Get all the information about the extractions being done. If the bit size
2856 if the same as the size of the underlying object, we aren't doing an
2857 extraction at all and so can do nothing. We also don't want to
2858 do anything if the inner expression is a PLACEHOLDER_EXPR since we
2859 then will no longer be able to replace it. */
2860 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2861 &lunsignedp, &lvolatilep, &alignment);
2862 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2863 || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
2868 /* If this is not a constant, we can only do something if bit positions,
2869 sizes, and signedness are the same. */
2870 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
2871 &runsignedp, &rvolatilep, &alignment);
2873 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2874 || lunsignedp != runsignedp || offset != 0
2875 || TREE_CODE (rinner) == PLACEHOLDER_EXPR)
2879 /* See if we can find a mode to refer to this field. We should be able to,
2880 but fail if we can't. */
2881 nmode = get_best_mode (lbitsize, lbitpos,
2882 const_p ? TYPE_ALIGN (TREE_TYPE (linner))
2883 : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
2884 TYPE_ALIGN (TREE_TYPE (rinner))),
2885 word_mode, lvolatilep || rvolatilep);
2886 if (nmode == VOIDmode)
2889 /* Set signed and unsigned types of the precision of this mode for the
2891 signed_type = type_for_mode (nmode, 0);
2892 unsigned_type = type_for_mode (nmode, 1);
2894 /* Compute the bit position and size for the new reference and our offset
2895 within it. If the new reference is the same size as the original, we
2896 won't optimize anything, so return zero. */
2897 nbitsize = GET_MODE_BITSIZE (nmode);
2898 nbitpos = lbitpos & ~ (nbitsize - 1);
2900 if (nbitsize == lbitsize)
2903 if (BYTES_BIG_ENDIAN)
2904 lbitpos = nbitsize - lbitsize - lbitpos;
2906 /* Make the mask to be used against the extracted field. */
2907 mask = build_int_2 (~0, ~0);
2908 TREE_TYPE (mask) = unsigned_type;
2909 force_fit_type (mask, 0);
2910 mask = convert (unsigned_type, mask);
2911 mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0);
2912 mask = const_binop (RSHIFT_EXPR, mask,
2913 size_int (nbitsize - lbitsize - lbitpos), 0);
2916 /* If not comparing with constant, just rework the comparison
2918 return build (code, compare_type,
2919 build (BIT_AND_EXPR, unsigned_type,
2920 make_bit_field_ref (linner, unsigned_type,
2921 nbitsize, nbitpos, 1),
2923 build (BIT_AND_EXPR, unsigned_type,
2924 make_bit_field_ref (rinner, unsigned_type,
2925 nbitsize, nbitpos, 1),
2928 /* Otherwise, we are handling the constant case. See if the constant is too
2929 big for the field. Warn and return a tree of for 0 (false) if so. We do
2930 this not only for its own sake, but to avoid having to test for this
2931 error case below. If we didn't, we might generate wrong code.
2933 For unsigned fields, the constant shifted right by the field length should
2934 be all zero. For signed fields, the high-order bits should agree with
2939 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2940 convert (unsigned_type, rhs),
2941 size_int (lbitsize), 0)))
2943 warning ("comparison is always %d due to width of bitfield",
2945 return convert (compare_type,
2947 ? integer_one_node : integer_zero_node));
2952 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2953 size_int (lbitsize - 1), 0);
2954 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2956 warning ("comparison is always %d due to width of bitfield",
2958 return convert (compare_type,
2960 ? integer_one_node : integer_zero_node));
2964 /* Single-bit compares should always be against zero. */
2965 if (lbitsize == 1 && ! integer_zerop (rhs))
2967 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2968 rhs = convert (type, integer_zero_node);
2971 /* Make a new bitfield reference, shift the constant over the
2972 appropriate number of bits and mask it with the computed mask
2973 (in case this was a signed field). If we changed it, make a new one. */
2974 lhs = make_bit_field_ref (linner, unsigned_type, nbitsize, nbitpos, 1);
2977 TREE_SIDE_EFFECTS (lhs) = 1;
2978 TREE_THIS_VOLATILE (lhs) = 1;
2981 rhs = fold (const_binop (BIT_AND_EXPR,
2982 const_binop (LSHIFT_EXPR,
2983 convert (unsigned_type, rhs),
2984 size_int (lbitpos), 0),
2987 return build (code, compare_type,
2988 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2992 /* Subroutine for fold_truthop: decode a field reference.
2994 If EXP is a comparison reference, we return the innermost reference.
2996 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2997 set to the starting bit number.
2999 If the innermost field can be completely contained in a mode-sized
3000 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
3002 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
3003 otherwise it is not changed.
3005 *PUNSIGNEDP is set to the signedness of the field.
3007 *PMASK is set to the mask used. This is either contained in a
3008 BIT_AND_EXPR or derived from the width of the field.
3010 *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
3012 Return 0 if this is not a component reference or is one that we can't
3013 do anything with. */
3016 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
3017 pvolatilep, pmask, pand_mask)
3019 int *pbitsize, *pbitpos;
3020 enum machine_mode *pmode;
3021 int *punsignedp, *pvolatilep;
3026 tree mask, inner, offset;
3031 /* All the optimizations using this function assume integer fields.
3032 There are problems with FP fields since the type_for_size call
3033 below can fail for, e.g., XFmode. */
3034 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
3039 if (TREE_CODE (exp) == BIT_AND_EXPR)
3041 and_mask = TREE_OPERAND (exp, 1);
3042 exp = TREE_OPERAND (exp, 0);
3043 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
3044 if (TREE_CODE (and_mask) != INTEGER_CST)
3049 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
3050 punsignedp, pvolatilep, &alignment);
3051 if ((inner == exp && and_mask == 0)
3052 || *pbitsize < 0 || offset != 0
3053 || TREE_CODE (inner) == PLACEHOLDER_EXPR)
3056 /* Compute the mask to access the bitfield. */
3057 unsigned_type = type_for_size (*pbitsize, 1);
3058 precision = TYPE_PRECISION (unsigned_type);
3060 mask = build_int_2 (~0, ~0);
3061 TREE_TYPE (mask) = unsigned_type;
3062 force_fit_type (mask, 0);
3063 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
3064 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
3066 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
3068 mask = fold (build (BIT_AND_EXPR, unsigned_type,
3069 convert (unsigned_type, and_mask), mask));
3072 *pand_mask = and_mask;
3076 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
3080 all_ones_mask_p (mask, size)
3084 tree type = TREE_TYPE (mask);
3085 int precision = TYPE_PRECISION (type);
3088 tmask = build_int_2 (~0, ~0);
3089 TREE_TYPE (tmask) = signed_type (type);
3090 force_fit_type (tmask, 0);
3092 tree_int_cst_equal (mask,
3093 const_binop (RSHIFT_EXPR,
3094 const_binop (LSHIFT_EXPR, tmask,
3095 size_int (precision - size),
3097 size_int (precision - size), 0));
3100 /* Subroutine for fold_truthop: determine if an operand is simple enough
3101 to be evaluated unconditionally. */
3104 simple_operand_p (exp)
3107 /* Strip any conversions that don't change the machine mode. */
3108 while ((TREE_CODE (exp) == NOP_EXPR
3109 || TREE_CODE (exp) == CONVERT_EXPR)
3110 && (TYPE_MODE (TREE_TYPE (exp))
3111 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
3112 exp = TREE_OPERAND (exp, 0);
3114 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
3115 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
3116 && ! TREE_ADDRESSABLE (exp)
3117 && ! TREE_THIS_VOLATILE (exp)
3118 && ! DECL_NONLOCAL (exp)
3119 /* Don't regard global variables as simple. They may be
3120 allocated in ways unknown to the compiler (shared memory,
3121 #pragma weak, etc). */
3122 && ! TREE_PUBLIC (exp)
3123 && ! DECL_EXTERNAL (exp)
3124 /* Loading a static variable is unduly expensive, but global
3125 registers aren't expensive. */
3126 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
3129 /* The following functions are subroutines to fold_range_test and allow it to
3130 try to change a logical combination of comparisons into a range test.
3133 X == 2 && X == 3 && X == 4 && X == 5
3137 (unsigned) (X - 2) <= 3
3139 We describe each set of comparisons as being either inside or outside
3140 a range, using a variable named like IN_P, and then describe the
3141 range with a lower and upper bound. If one of the bounds is omitted,
3142 it represents either the highest or lowest value of the type.
3144 In the comments below, we represent a range by two numbers in brackets
3145 preceded by a "+" to designate being inside that range, or a "-" to
3146 designate being outside that range, so the condition can be inverted by
3147 flipping the prefix. An omitted bound is represented by a "-". For
3148 example, "- [-, 10]" means being outside the range starting at the lowest
3149 possible value and ending at 10, in other words, being greater than 10.
3150 The range "+ [-, -]" is always true and hence the range "- [-, -]" is
3153 We set up things so that the missing bounds are handled in a consistent
3154 manner so neither a missing bound nor "true" and "false" need to be
3155 handled using a special case. */
3157 /* Return the result of applying CODE to ARG0 and ARG1, but handle the case
3158 of ARG0 and/or ARG1 being omitted, meaning an unlimited range. UPPER0_P
3159 and UPPER1_P are nonzero if the respective argument is an upper bound
3160 and zero for a lower. TYPE, if nonzero, is the type of the result; it
3161 must be specified for a comparison. ARG1 will be converted to ARG0's
3162 type if both are specified. */
3165 range_binop (code, type, arg0, upper0_p, arg1, upper1_p)
3166 enum tree_code code;
3169 int upper0_p, upper1_p;
3175 /* If neither arg represents infinity, do the normal operation.
3176 Else, if not a comparison, return infinity. Else handle the special
3177 comparison rules. Note that most of the cases below won't occur, but
3178 are handled for consistency. */
3180 if (arg0 != 0 && arg1 != 0)
3182 tem = fold (build (code, type != 0 ? type : TREE_TYPE (arg0),
3183 arg0, convert (TREE_TYPE (arg0), arg1)));
3185 return TREE_CODE (tem) == INTEGER_CST ? tem : 0;
3188 if (TREE_CODE_CLASS (code) != '<')
3191 /* Set SGN[01] to -1 if ARG[01] is a lower bound, 1 for upper, and 0
3192 for neither. In real maths, we cannot assume open ended ranges are
3193 the same. But, this is computer arithmetic, where numbers are finite.
3194 We can therefore make the transformation of any unbounded range with
3195 the value Z, Z being greater than any representable number. This permits
3196 us to treat unbounded ranges as equal. */
3197 sgn0 = arg0 != 0 ? 0 : (upper0_p ? 1 : -1);
3198 sgn1 = arg1 != 0 ? 0 : (upper1_p ? 1 : -1);
3202 result = sgn0 == sgn1;
3205 result = sgn0 != sgn1;
3208 result = sgn0 < sgn1;
3211 result = sgn0 <= sgn1;
3214 result = sgn0 > sgn1;
3217 result = sgn0 >= sgn1;
3223 return convert (type, result ? integer_one_node : integer_zero_node);
3226 /* Given EXP, a logical expression, set the range it is testing into
3227 variables denoted by PIN_P, PLOW, and PHIGH. Return the expression
3228 actually being tested. *PLOW and *PHIGH will have be made the same type
3229 as the returned expression. If EXP is not a comparison, we will most
3230 likely not be returning a useful value and range. */
3233 make_range (exp, pin_p, plow, phigh)
3238 enum tree_code code;
3239 tree arg0 = NULL_TREE, arg1 = NULL_TREE, type = NULL_TREE;
3240 tree orig_type = NULL_TREE;
3242 tree low, high, n_low, n_high;
3244 /* Start with simply saying "EXP != 0" and then look at the code of EXP
3245 and see if we can refine the range. Some of the cases below may not
3246 happen, but it doesn't seem worth worrying about this. We "continue"
3247 the outer loop when we've changed something; otherwise we "break"
3248 the switch, which will "break" the while. */
3250 in_p = 0, low = high = convert (TREE_TYPE (exp), integer_zero_node);
3254 code = TREE_CODE (exp);
3256 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
3258 arg0 = TREE_OPERAND (exp, 0);
3259 if (TREE_CODE_CLASS (code) == '<'
3260 || TREE_CODE_CLASS (code) == '1'
3261 || TREE_CODE_CLASS (code) == '2')
3262 type = TREE_TYPE (arg0);
3263 if (TREE_CODE_CLASS (code) == '2'
3264 || TREE_CODE_CLASS (code) == '<'
3265 || (TREE_CODE_CLASS (code) == 'e'
3266 && tree_code_length[(int) code] > 1))
3267 arg1 = TREE_OPERAND (exp, 1);
3270 /* Set ORIG_TYPE as soon as TYPE is non-null so that we do not
3271 lose a cast by accident. */
3272 if (type != NULL_TREE && orig_type == NULL_TREE)
3277 case TRUTH_NOT_EXPR:
3278 in_p = ! in_p, exp = arg0;
3281 case EQ_EXPR: case NE_EXPR:
3282 case LT_EXPR: case LE_EXPR: case GE_EXPR: case GT_EXPR:
3283 /* We can only do something if the range is testing for zero
3284 and if the second operand is an integer constant. Note that
3285 saying something is "in" the range we make is done by
3286 complementing IN_P since it will set in the initial case of
3287 being not equal to zero; "out" is leaving it alone. */
3288 if (low == 0 || high == 0
3289 || ! integer_zerop (low) || ! integer_zerop (high)
3290 || TREE_CODE (arg1) != INTEGER_CST)
3295 case NE_EXPR: /* - [c, c] */
3298 case EQ_EXPR: /* + [c, c] */
3299 in_p = ! in_p, low = high = arg1;
3301 case GT_EXPR: /* - [-, c] */
3302 low = 0, high = arg1;
3304 case GE_EXPR: /* + [c, -] */
3305 in_p = ! in_p, low = arg1, high = 0;
3307 case LT_EXPR: /* - [c, -] */
3308 low = arg1, high = 0;
3310 case LE_EXPR: /* + [-, c] */
3311 in_p = ! in_p, low = 0, high = arg1;
3319 /* If this is an unsigned comparison, we also know that EXP is
3320 greater than or equal to zero. We base the range tests we make
3321 on that fact, so we record it here so we can parse existing
3323 if (TREE_UNSIGNED (type) && (low == 0 || high == 0))
3325 if (! merge_ranges (&n_in_p, &n_low, &n_high, in_p, low, high,
3326 1, convert (type, integer_zero_node),
3330 in_p = n_in_p, low = n_low, high = n_high;
3332 /* If the high bound is missing, but we
3333 have a low bound, reverse the range so
3334 it goes from zero to the low bound minus 1. */
3335 if (high == 0 && low)
3338 high = range_binop (MINUS_EXPR, NULL_TREE, low, 0,
3339 integer_one_node, 0);
3340 low = convert (type, integer_zero_node);
3346 /* (-x) IN [a,b] -> x in [-b, -a] */
3347 n_low = range_binop (MINUS_EXPR, type,
3348 convert (type, integer_zero_node), 0, high, 1);
3349 n_high = range_binop (MINUS_EXPR, type,
3350 convert (type, integer_zero_node), 0, low, 0);
3351 low = n_low, high = n_high;
3357 exp = build (MINUS_EXPR, type, negate_expr (arg0),
3358 convert (type, integer_one_node));
3361 case PLUS_EXPR: case MINUS_EXPR:
3362 if (TREE_CODE (arg1) != INTEGER_CST)
3365 /* If EXP is signed, any overflow in the computation is undefined,
3366 so we don't worry about it so long as our computations on
3367 the bounds don't overflow. For unsigned, overflow is defined
3368 and this is exactly the right thing. */
3369 n_low = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR,
3370 type, low, 0, arg1, 0);
3371 n_high = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR,
3372 type, high, 1, arg1, 0);
3373 if ((n_low != 0 && TREE_OVERFLOW (n_low))
3374 || (n_high != 0 && TREE_OVERFLOW (n_high)))
3377 /* Check for an unsigned range which has wrapped around the maximum
3378 value thus making n_high < n_low, and normalize it. */
3379 if (n_low && n_high && tree_int_cst_lt (n_high, n_low))
3381 low = range_binop (PLUS_EXPR, type, n_high, 0,
3382 integer_one_node, 0);
3383 high = range_binop (MINUS_EXPR, type, n_low, 0,
3384 integer_one_node, 0);
3388 low = n_low, high = n_high;
3393 case NOP_EXPR: case NON_LVALUE_EXPR: case CONVERT_EXPR:
3394 if (TYPE_PRECISION (type) > TYPE_PRECISION (orig_type))
3397 if (! INTEGRAL_TYPE_P (type)
3398 || (low != 0 && ! int_fits_type_p (low, type))
3399 || (high != 0 && ! int_fits_type_p (high, type)))
3402 n_low = low, n_high = high;
3405 n_low = convert (type, n_low);
3408 n_high = convert (type, n_high);
3410 /* If we're converting from an unsigned to a signed type,
3411 we will be doing the comparison as unsigned. The tests above
3412 have already verified that LOW and HIGH are both positive.
3414 So we have to make sure that the original unsigned value will
3415 be interpreted as positive. */
3416 if (TREE_UNSIGNED (type) && ! TREE_UNSIGNED (TREE_TYPE (exp)))
3418 tree equiv_type = type_for_mode (TYPE_MODE (type), 1);
3421 /* A range without an upper bound is, naturally, unbounded.
3422 Since convert would have cropped a very large value, use
3423 the max value for the destination type. */
3425 = TYPE_MAX_VALUE (equiv_type) ? TYPE_MAX_VALUE (equiv_type)
3426 : TYPE_MAX_VALUE (type);
3428 high_positive = fold (build (RSHIFT_EXPR, type,
3429 convert (type, high_positive),
3430 convert (type, integer_one_node)));
3432 /* If the low bound is specified, "and" the range with the
3433 range for which the original unsigned value will be
3437 if (! merge_ranges (&n_in_p, &n_low, &n_high,
3439 1, convert (type, integer_zero_node),
3443 in_p = (n_in_p == in_p);
3447 /* Otherwise, "or" the range with the range of the input
3448 that will be interpreted as negative. */
3449 if (! merge_ranges (&n_in_p, &n_low, &n_high,
3451 1, convert (type, integer_zero_node),
3455 in_p = (in_p != n_in_p);
3460 low = n_low, high = n_high;
3470 /* If EXP is a constant, we can evaluate whether this is true or false. */
3471 if (TREE_CODE (exp) == INTEGER_CST)
3473 in_p = in_p == (integer_onep (range_binop (GE_EXPR, integer_type_node,
3475 && integer_onep (range_binop (LE_EXPR, integer_type_node,
3481 *pin_p = in_p, *plow = low, *phigh = high;
3485 /* Given a range, LOW, HIGH, and IN_P, an expression, EXP, and a result
3486 type, TYPE, return an expression to test if EXP is in (or out of, depending
3487 on IN_P) the range. */
3490 build_range_check (type, exp, in_p, low, high)
3496 tree etype = TREE_TYPE (exp);
3500 && (0 != (value = build_range_check (type, exp, 1, low, high))))
3501 return invert_truthvalue (value);
3503 else if (low == 0 && high == 0)
3504 return convert (type, integer_one_node);
3507 return fold (build (LE_EXPR, type, exp, high));
3510 return fold (build (GE_EXPR, type, exp, low));
3512 else if (operand_equal_p (low, high, 0))
3513 return fold (build (EQ_EXPR, type, exp, low));
3515 else if (TREE_UNSIGNED (etype) && integer_zerop (low))
3516 return build_range_check (type, exp, 1, 0, high);
3518 else if (integer_zerop (low))
3520 utype = unsigned_type (etype);
3521 return build_range_check (type, convert (utype, exp), 1, 0,
3522 convert (utype, high));
3525 else if (0 != (value = const_binop (MINUS_EXPR, high, low, 0))
3526 && ! TREE_OVERFLOW (value))
3527 return build_range_check (type,
3528 fold (build (MINUS_EXPR, etype, exp, low)),
3529 1, convert (etype, integer_zero_node), value);
3534 /* Given two ranges, see if we can merge them into one. Return 1 if we
3535 can, 0 if we can't. Set the output range into the specified parameters. */
3538 merge_ranges (pin_p, plow, phigh, in0_p, low0, high0, in1_p, low1, high1)
3542 tree low0, high0, low1, high1;
3550 int lowequal = ((low0 == 0 && low1 == 0)
3551 || integer_onep (range_binop (EQ_EXPR, integer_type_node,
3552 low0, 0, low1, 0)));
3553 int highequal = ((high0 == 0 && high1 == 0)
3554 || integer_onep (range_binop (EQ_EXPR, integer_type_node,
3555 high0, 1, high1, 1)));
3557 /* Make range 0 be the range that starts first, or ends last if they
3558 start at the same value. Swap them if it isn't. */
3559 if (integer_onep (range_binop (GT_EXPR, integer_type_node,
3562 && integer_onep (range_binop (GT_EXPR, integer_type_node,
3563 high1, 1, high0, 1))))
3565 temp = in0_p, in0_p = in1_p, in1_p = temp;
3566 tem = low0, low0 = low1, low1 = tem;
3567 tem = high0, high0 = high1, high1 = tem;
3570 /* Now flag two cases, whether the ranges are disjoint or whether the
3571 second range is totally subsumed in the first. Note that the tests
3572 below are simplified by the ones above. */
3573 no_overlap = integer_onep (range_binop (LT_EXPR, integer_type_node,
3574 high0, 1, low1, 0));
3575 subset = integer_onep (range_binop (LE_EXPR, integer_type_node,
3576 high1, 1, high0, 1));
3578 /* We now have four cases, depending on whether we are including or
3579 excluding the two ranges. */
3582 /* If they don't overlap, the result is false. If the second range
3583 is a subset it is the result. Otherwise, the range is from the start
3584 of the second to the end of the first. */
3586 in_p = 0, low = high = 0;
3588 in_p = 1, low = low1, high = high1;
3590 in_p = 1, low = low1, high = high0;
3593 else if (in0_p && ! in1_p)
3595 /* If they don't overlap, the result is the first range. If they are
3596 equal, the result is false. If the second range is a subset of the
3597 first, and the ranges begin at the same place, we go from just after
3598 the end of the first range to the end of the second. If the second
3599 range is not a subset of the first, or if it is a subset and both
3600 ranges end at the same place, the range starts at the start of the
3601 first range and ends just before the second range.
3602 Otherwise, we can't describe this as a single range. */
3604 in_p = 1, low = low0, high = high0;
3605 else if (lowequal && highequal)
3606 in_p = 0, low = high = 0;
3607 else if (subset && lowequal)
3609 in_p = 1, high = high0;
3610 low = range_binop (PLUS_EXPR, NULL_TREE, high1, 0,
3611 integer_one_node, 0);
3613 else if (! subset || highequal)
3615 in_p = 1, low = low0;
3616 high = range_binop (MINUS_EXPR, NULL_TREE, low1, 0,
3617 integer_one_node, 0);
3623 else if (! in0_p && in1_p)
3625 /* If they don't overlap, the result is the second range. If the second
3626 is a subset of the first, the result is false. Otherwise,
3627 the range starts just after the first range and ends at the
3628 end of the second. */
3630 in_p = 1, low = low1, high = high1;
3631 else if (subset || highequal)
3632 in_p = 0, low = high = 0;
3635 in_p = 1, high = high1;
3636 low = range_binop (PLUS_EXPR, NULL_TREE, high0, 1,
3637 integer_one_node, 0);
3643 /* The case where we are excluding both ranges. Here the complex case
3644 is if they don't overlap. In that case, the only time we have a
3645 range is if they are adjacent. If the second is a subset of the
3646 first, the result is the first. Otherwise, the range to exclude
3647 starts at the beginning of the first range and ends at the end of the
3651 if (integer_onep (range_binop (EQ_EXPR, integer_type_node,
3652 range_binop (PLUS_EXPR, NULL_TREE,
3654 integer_one_node, 1),
3656 in_p = 0, low = low0, high = high1;
3661 in_p = 0, low = low0, high = high0;
3663 in_p = 0, low = low0, high = high1;
3666 *pin_p = in_p, *plow = low, *phigh = high;
3670 /* EXP is some logical combination of boolean tests. See if we can
3671 merge it into some range test. Return the new tree if so. */
3674 fold_range_test (exp)
3677 int or_op = (TREE_CODE (exp) == TRUTH_ORIF_EXPR
3678 || TREE_CODE (exp) == TRUTH_OR_EXPR);
3679 int in0_p, in1_p, in_p;
3680 tree low0, low1, low, high0, high1, high;
3681 tree lhs = make_range (TREE_OPERAND (exp, 0), &in0_p, &low0, &high0);
3682 tree rhs = make_range (TREE_OPERAND (exp, 1), &in1_p, &low1, &high1);
3685 /* If this is an OR operation, invert both sides; we will invert
3686 again at the end. */
3688 in0_p = ! in0_p, in1_p = ! in1_p;
3690 /* If both expressions are the same, if we can merge the ranges, and we
3691 can build the range test, return it or it inverted. If one of the
3692 ranges is always true or always false, consider it to be the same
3693 expression as the other. */
3694 if ((lhs == 0 || rhs == 0 || operand_equal_p (lhs, rhs, 0))
3695 && merge_ranges (&in_p, &low, &high, in0_p, low0, high0,
3697 && 0 != (tem = (build_range_check (TREE_TYPE (exp),
3699 : rhs != 0 ? rhs : integer_zero_node,
3701 return or_op ? invert_truthvalue (tem) : tem;
3703 /* On machines where the branch cost is expensive, if this is a
3704 short-circuited branch and the underlying object on both sides
3705 is the same, make a non-short-circuit operation. */
3706 else if (BRANCH_COST >= 2
3707 && (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
3708 || TREE_CODE (exp) == TRUTH_ORIF_EXPR)
3709 && operand_equal_p (lhs, rhs, 0))
3711 /* If simple enough, just rewrite. Otherwise, make a SAVE_EXPR
3712 unless we are at top level or LHS contains a PLACEHOLDER_EXPR, in
3713 which cases we can't do this. */
3714 if (simple_operand_p (lhs))
3715 return build (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
3716 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
3717 TREE_TYPE (exp), TREE_OPERAND (exp, 0),
3718 TREE_OPERAND (exp, 1));
3720 else if (global_bindings_p () == 0
3721 && ! contains_placeholder_p (lhs))
3723 tree common = save_expr (lhs);
3725 if (0 != (lhs = build_range_check (TREE_TYPE (exp), common,
3726 or_op ? ! in0_p : in0_p,
3728 && (0 != (rhs = build_range_check (TREE_TYPE (exp), common,
3729 or_op ? ! in1_p : in1_p,
3731 return build (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
3732 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
3733 TREE_TYPE (exp), lhs, rhs);
3740 /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
3741 bit value. Arrange things so the extra bits will be set to zero if and
3742 only if C is signed-extended to its full width. If MASK is nonzero,
3743 it is an INTEGER_CST that should be AND'ed with the extra bits. */
3746 unextend (c, p, unsignedp, mask)
3752 tree type = TREE_TYPE (c);
3753 int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
3756 if (p == modesize || unsignedp)
3759 /* We work by getting just the sign bit into the low-order bit, then
3760 into the high-order bit, then sign-extend. We then XOR that value
3762 temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
3763 temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
3765 /* We must use a signed type in order to get an arithmetic right shift.
3766 However, we must also avoid introducing accidental overflows, so that
3767 a subsequent call to integer_zerop will work. Hence we must
3768 do the type conversion here. At this point, the constant is either
3769 zero or one, and the conversion to a signed type can never overflow.
3770 We could get an overflow if this conversion is done anywhere else. */
3771 if (TREE_UNSIGNED (type))
3772 temp = convert (signed_type (type), temp);
3774 temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
3775 temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
3777 temp = const_binop (BIT_AND_EXPR, temp, convert (TREE_TYPE (c), mask), 0);
3778 /* If necessary, convert the type back to match the type of C. */
3779 if (TREE_UNSIGNED (type))
3780 temp = convert (type, temp);
3782 return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
3785 /* Find ways of folding logical expressions of LHS and RHS:
3786 Try to merge two comparisons to the same innermost item.
3787 Look for range tests like "ch >= '0' && ch <= '9'".
3788 Look for combinations of simple terms on machines with expensive branches
3789 and evaluate the RHS unconditionally.
3791 For example, if we have p->a == 2 && p->b == 4 and we can make an
3792 object large enough to span both A and B, we can do this with a comparison
3793 against the object ANDed with the a mask.
3795 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
3796 operations to do this with one comparison.
3798 We check for both normal comparisons and the BIT_AND_EXPRs made this by
3799 function and the one above.
3801 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
3802 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
3804 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
3807 We return the simplified tree or 0 if no optimization is possible. */
3810 fold_truthop (code, truth_type, lhs, rhs)
3811 enum tree_code code;
3812 tree truth_type, lhs, rhs;
3814 /* If this is the "or" of two comparisons, we can do something if we
3815 the comparisons are NE_EXPR. If this is the "and", we can do something
3816 if the comparisons are EQ_EXPR. I.e.,
3817 (a->b == 2 && a->c == 4) can become (a->new == NEW).
3819 WANTED_CODE is this operation code. For single bit fields, we can
3820 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
3821 comparison for one-bit fields. */
3823 enum tree_code wanted_code;
3824 enum tree_code lcode, rcode;
3825 tree ll_arg, lr_arg, rl_arg, rr_arg;
3826 tree ll_inner, lr_inner, rl_inner, rr_inner;
3827 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
3828 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
3829 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
3830 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
3831 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
3832 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
3833 enum machine_mode lnmode, rnmode;
3834 tree ll_mask, lr_mask, rl_mask, rr_mask;
3835 tree ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask;
3836 tree l_const, r_const;
3837 tree lntype, rntype, result;
3838 int first_bit, end_bit;
3841 /* Start by getting the comparison codes. Fail if anything is volatile.
3842 If one operand is a BIT_AND_EXPR with the constant one, treat it as if
3843 it were surrounded with a NE_EXPR. */
3845 if (TREE_SIDE_EFFECTS (lhs) || TREE_SIDE_EFFECTS (rhs))
3848 lcode = TREE_CODE (lhs);
3849 rcode = TREE_CODE (rhs);
3851 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
3852 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
3854 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
3855 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
3857 if (TREE_CODE_CLASS (lcode) != '<' || TREE_CODE_CLASS (rcode) != '<')
3860 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
3861 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
3863 ll_arg = TREE_OPERAND (lhs, 0);
3864 lr_arg = TREE_OPERAND (lhs, 1);
3865 rl_arg = TREE_OPERAND (rhs, 0);
3866 rr_arg = TREE_OPERAND (rhs, 1);
3868 /* If the RHS can be evaluated unconditionally and its operands are
3869 simple, it wins to evaluate the RHS unconditionally on machines
3870 with expensive branches. In this case, this isn't a comparison
3871 that can be merged. Avoid doing this if the RHS is a floating-point
3872 comparison since those can trap. */
3874 if (BRANCH_COST >= 2
3875 && ! FLOAT_TYPE_P (TREE_TYPE (rl_arg))
3876 && simple_operand_p (rl_arg)
3877 && simple_operand_p (rr_arg))
3878 return build (code, truth_type, lhs, rhs);
3880 /* See if the comparisons can be merged. Then get all the parameters for
3883 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
3884 || (rcode != EQ_EXPR && rcode != NE_EXPR))
3888 ll_inner = decode_field_reference (ll_arg,
3889 &ll_bitsize, &ll_bitpos, &ll_mode,
3890 &ll_unsignedp, &volatilep, &ll_mask,
3892 lr_inner = decode_field_reference (lr_arg,
3893 &lr_bitsize, &lr_bitpos, &lr_mode,
3894 &lr_unsignedp, &volatilep, &lr_mask,
3896 rl_inner = decode_field_reference (rl_arg,
3897 &rl_bitsize, &rl_bitpos, &rl_mode,
3898 &rl_unsignedp, &volatilep, &rl_mask,
3900 rr_inner = decode_field_reference (rr_arg,
3901 &rr_bitsize, &rr_bitpos, &rr_mode,
3902 &rr_unsignedp, &volatilep, &rr_mask,
3905 /* It must be true that the inner operation on the lhs of each
3906 comparison must be the same if we are to be able to do anything.
3907 Then see if we have constants. If not, the same must be true for
3909 if (volatilep || ll_inner == 0 || rl_inner == 0
3910 || ! operand_equal_p (ll_inner, rl_inner, 0))
3913 if (TREE_CODE (lr_arg) == INTEGER_CST
3914 && TREE_CODE (rr_arg) == INTEGER_CST)
3915 l_const = lr_arg, r_const = rr_arg;
3916 else if (lr_inner == 0 || rr_inner == 0
3917 || ! operand_equal_p (lr_inner, rr_inner, 0))
3920 l_const = r_const = 0;
3922 /* If either comparison code is not correct for our logical operation,
3923 fail. However, we can convert a one-bit comparison against zero into
3924 the opposite comparison against that bit being set in the field. */
3926 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
3927 if (lcode != wanted_code)
3929 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
3931 /* Make the left operand unsigned, since we are only interested
3932 in the value of one bit. Otherwise we are doing the wrong
3941 /* This is analogous to the code for l_const above. */
3942 if (rcode != wanted_code)
3944 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
3953 /* See if we can find a mode that contains both fields being compared on
3954 the left. If we can't, fail. Otherwise, update all constants and masks
3955 to be relative to a field of that size. */
3956 first_bit = MIN (ll_bitpos, rl_bitpos);
3957 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
3958 lnmode = get_best_mode (end_bit - first_bit, first_bit,
3959 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
3961 if (lnmode == VOIDmode)
3964 lnbitsize = GET_MODE_BITSIZE (lnmode);
3965 lnbitpos = first_bit & ~ (lnbitsize - 1);
3966 lntype = type_for_size (lnbitsize, 1);
3967 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
3969 if (BYTES_BIG_ENDIAN)
3971 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
3972 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
3975 ll_mask = const_binop (LSHIFT_EXPR, convert (lntype, ll_mask),
3976 size_int (xll_bitpos), 0);
3977 rl_mask = const_binop (LSHIFT_EXPR, convert (lntype, rl_mask),
3978 size_int (xrl_bitpos), 0);
3982 l_const = convert (lntype, l_const);
3983 l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
3984 l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
3985 if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
3986 fold (build1 (BIT_NOT_EXPR,
3990 warning ("comparison is always %d", wanted_code == NE_EXPR);
3992 return convert (truth_type,
3993 wanted_code == NE_EXPR
3994 ? integer_one_node : integer_zero_node);
3999 r_const = convert (lntype, r_const);
4000 r_const = unextend (r_const, rl_bitsize, rl_unsignedp, rl_and_mask);
4001 r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
4002 if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
4003 fold (build1 (BIT_NOT_EXPR,
4007 warning ("comparison is always %d", wanted_code == NE_EXPR);
4009 return convert (truth_type,
4010 wanted_code == NE_EXPR
4011 ? integer_one_node : integer_zero_node);
4015 /* If the right sides are not constant, do the same for it. Also,
4016 disallow this optimization if a size or signedness mismatch occurs
4017 between the left and right sides. */
4020 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
4021 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
4022 /* Make sure the two fields on the right
4023 correspond to the left without being swapped. */
4024 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
4027 first_bit = MIN (lr_bitpos, rr_bitpos);
4028 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
4029 rnmode = get_best_mode (end_bit - first_bit, first_bit,
4030 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
4032 if (rnmode == VOIDmode)
4035 rnbitsize = GET_MODE_BITSIZE (rnmode);
4036 rnbitpos = first_bit & ~ (rnbitsize - 1);
4037 rntype = type_for_size (rnbitsize, 1);
4038 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
4040 if (BYTES_BIG_ENDIAN)
4042 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
4043 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
4046 lr_mask = const_binop (LSHIFT_EXPR, convert (rntype, lr_mask),
4047 size_int (xlr_bitpos), 0);
4048 rr_mask = const_binop (LSHIFT_EXPR, convert (rntype, rr_mask),
4049 size_int (xrr_bitpos), 0);
4051 /* Make a mask that corresponds to both fields being compared.
4052 Do this for both items being compared. If the operands are the
4053 same size and the bits being compared are in the same position
4054 then we can do this by masking both and comparing the masked
4056 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
4057 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
4058 if (lnbitsize == rnbitsize && xll_bitpos == xlr_bitpos)
4060 lhs = make_bit_field_ref (ll_inner, lntype, lnbitsize, lnbitpos,
4061 ll_unsignedp || rl_unsignedp);
4062 if (! all_ones_mask_p (ll_mask, lnbitsize))
4063 lhs = build (BIT_AND_EXPR, lntype, lhs, ll_mask);
4065 rhs = make_bit_field_ref (lr_inner, rntype, rnbitsize, rnbitpos,
4066 lr_unsignedp || rr_unsignedp);
4067 if (! all_ones_mask_p (lr_mask, rnbitsize))
4068 rhs = build (BIT_AND_EXPR, rntype, rhs, lr_mask);
4070 return build (wanted_code, truth_type, lhs, rhs);
4073 /* There is still another way we can do something: If both pairs of
4074 fields being compared are adjacent, we may be able to make a wider
4075 field containing them both.
4077 Note that we still must mask the lhs/rhs expressions. Furthermore,
4078 the mask must be shifted to account for the shift done by
4079 make_bit_field_ref. */
4080 if ((ll_bitsize + ll_bitpos == rl_bitpos
4081 && lr_bitsize + lr_bitpos == rr_bitpos)
4082 || (ll_bitpos == rl_bitpos + rl_bitsize
4083 && lr_bitpos == rr_bitpos + rr_bitsize))
4087 lhs = make_bit_field_ref (ll_inner, lntype, ll_bitsize + rl_bitsize,
4088 MIN (ll_bitpos, rl_bitpos), ll_unsignedp);
4089 rhs = make_bit_field_ref (lr_inner, rntype, lr_bitsize + rr_bitsize,
4090 MIN (lr_bitpos, rr_bitpos), lr_unsignedp);
4092 ll_mask = const_binop (RSHIFT_EXPR, ll_mask,
4093 size_int (MIN (xll_bitpos, xrl_bitpos)), 0);
4094 lr_mask = const_binop (RSHIFT_EXPR, lr_mask,
4095 size_int (MIN (xlr_bitpos, xrr_bitpos)), 0);
4097 /* Convert to the smaller type before masking out unwanted bits. */
4099 if (lntype != rntype)
4101 if (lnbitsize > rnbitsize)
4103 lhs = convert (rntype, lhs);
4104 ll_mask = convert (rntype, ll_mask);
4107 else if (lnbitsize < rnbitsize)
4109 rhs = convert (lntype, rhs);
4110 lr_mask = convert (lntype, lr_mask);
4115 if (! all_ones_mask_p (ll_mask, ll_bitsize + rl_bitsize))
4116 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
4118 if (! all_ones_mask_p (lr_mask, lr_bitsize + rr_bitsize))
4119 rhs = build (BIT_AND_EXPR, type, rhs, lr_mask);
4121 return build (wanted_code, truth_type, lhs, rhs);
4127 /* Handle the case of comparisons with constants. If there is something in
4128 common between the masks, those bits of the constants must be the same.
4129 If not, the condition is always false. Test for this to avoid generating
4130 incorrect code below. */
4131 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
4132 if (! integer_zerop (result)
4133 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
4134 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
4136 if (wanted_code == NE_EXPR)
4138 warning ("`or' of unmatched not-equal tests is always 1");
4139 return convert (truth_type, integer_one_node);
4143 warning ("`and' of mutually exclusive equal-tests is always 0");
4144 return convert (truth_type, integer_zero_node);
4148 /* Construct the expression we will return. First get the component
4149 reference we will make. Unless the mask is all ones the width of
4150 that field, perform the mask operation. Then compare with the
4152 result = make_bit_field_ref (ll_inner, lntype, lnbitsize, lnbitpos,
4153 ll_unsignedp || rl_unsignedp);
4155 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
4156 if (! all_ones_mask_p (ll_mask, lnbitsize))
4157 result = build (BIT_AND_EXPR, lntype, result, ll_mask);
4159 return build (wanted_code, truth_type, result,
4160 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
4163 /* Optimize T, which is a comparison of a MIN_EXPR or MAX_EXPR with a
4167 optimize_minmax_comparison (t)
4170 tree type = TREE_TYPE (t);
4171 tree arg0 = TREE_OPERAND (t, 0);
4172 enum tree_code op_code;
4173 tree comp_const = TREE_OPERAND (t, 1);
4175 int consts_equal, consts_lt;
4178 STRIP_SIGN_NOPS (arg0);
4180 op_code = TREE_CODE (arg0);
4181 minmax_const = TREE_OPERAND (arg0, 1);
4182 consts_equal = tree_int_cst_equal (minmax_const, comp_const);
4183 consts_lt = tree_int_cst_lt (minmax_const, comp_const);
4184 inner = TREE_OPERAND (arg0, 0);
4186 /* If something does not permit us to optimize, return the original tree. */
4187 if ((op_code != MIN_EXPR && op_code != MAX_EXPR)
4188 || TREE_CODE (comp_const) != INTEGER_CST
4189 || TREE_CONSTANT_OVERFLOW (comp_const)
4190 || TREE_CODE (minmax_const) != INTEGER_CST
4191 || TREE_CONSTANT_OVERFLOW (minmax_const))
4194 /* Now handle all the various comparison codes. We only handle EQ_EXPR
4195 and GT_EXPR, doing the rest with recursive calls using logical
4197 switch (TREE_CODE (t))
4199 case NE_EXPR: case LT_EXPR: case LE_EXPR:
4201 invert_truthvalue (optimize_minmax_comparison (invert_truthvalue (t)));
4205 fold (build (TRUTH_ORIF_EXPR, type,
4206 optimize_minmax_comparison
4207 (build (EQ_EXPR, type, arg0, comp_const)),
4208 optimize_minmax_comparison
4209 (build (GT_EXPR, type, arg0, comp_const))));
4212 if (op_code == MAX_EXPR && consts_equal)
4213 /* MAX (X, 0) == 0 -> X <= 0 */
4214 return fold (build (LE_EXPR, type, inner, comp_const));
4216 else if (op_code == MAX_EXPR && consts_lt)
4217 /* MAX (X, 0) == 5 -> X == 5 */
4218 return fold (build (EQ_EXPR, type, inner, comp_const));
4220 else if (op_code == MAX_EXPR)
4221 /* MAX (X, 0) == -1 -> false */
4222 return omit_one_operand (type, integer_zero_node, inner);
4224 else if (consts_equal)
4225 /* MIN (X, 0) == 0 -> X >= 0 */
4226 return fold (build (GE_EXPR, type, inner, comp_const));
4229 /* MIN (X, 0) == 5 -> false */
4230 return omit_one_operand (type, integer_zero_node, inner);
4233 /* MIN (X, 0) == -1 -> X == -1 */
4234 return fold (build (EQ_EXPR, type, inner, comp_const));
4237 if (op_code == MAX_EXPR && (consts_equal || consts_lt))
4238 /* MAX (X, 0) > 0 -> X > 0
4239 MAX (X, 0) > 5 -> X > 5 */
4240 return fold (build (GT_EXPR, type, inner, comp_const));
4242 else if (op_code == MAX_EXPR)
4243 /* MAX (X, 0) > -1 -> true */
4244 return omit_one_operand (type, integer_one_node, inner);
4246 else if (op_code == MIN_EXPR && (consts_equal || consts_lt))
4247 /* MIN (X, 0) > 0 -> false
4248 MIN (X, 0) > 5 -> false */
4249 return omit_one_operand (type, integer_zero_node, inner);
4252 /* MIN (X, 0) > -1 -> X > -1 */
4253 return fold (build (GT_EXPR, type, inner, comp_const));
4260 /* T is an integer expression that is being multiplied, divided, or taken a
4261 modulus (CODE says which and what kind of divide or modulus) by a
4262 constant C. See if we can eliminate that operation by folding it with
4263 other operations already in T. WIDE_TYPE, if non-null, is a type that
4264 should be used for the computation if wider than our type.
4266 For example, if we are dividing (X * 8) + (Y + 16) by 4, we can return
4267 (X * 2) + (Y + 4). We also canonicalize (X + 7) * 4 into X * 4 + 28
4268 in the hope that either the machine has a multiply-accumulate insn
4269 or that this is part of an addressing calculation.
4271 If we return a non-null expression, it is an equivalent form of the
4272 original computation, but need not be in the original type. */
4275 extract_muldiv (t, c, code, wide_type)
4278 enum tree_code code;
4281 tree type = TREE_TYPE (t);
4282 enum tree_code tcode = TREE_CODE (t);
4283 tree ctype = (wide_type != 0 && (GET_MODE_SIZE (TYPE_MODE (wide_type))
4284 > GET_MODE_SIZE (TYPE_MODE (type)))
4285 ? wide_type : type);
4287 int same_p = tcode == code;
4288 tree op0 = NULL_TREE, op1 = NULL_TREE;
4290 /* Don't deal with constants of zero here; they confuse the code below. */
4291 if (integer_zerop (c))
4294 if (TREE_CODE_CLASS (tcode) == '1')
4295 op0 = TREE_OPERAND (t, 0);
4297 if (TREE_CODE_CLASS (tcode) == '2')
4298 op0 = TREE_OPERAND (t, 0), op1 = TREE_OPERAND (t, 1);
4300 /* Note that we need not handle conditional operations here since fold
4301 already handles those cases. So just do arithmetic here. */
4305 /* For a constant, we can always simplify if we are a multiply
4306 or (for divide and modulus) if it is a multiple of our constant. */
4307 if (code == MULT_EXPR
4308 || integer_zerop (const_binop (TRUNC_MOD_EXPR, t, c, 0)))
4309 return const_binop (code, convert (ctype, t), convert (ctype, c), 0);
4312 case CONVERT_EXPR: case NON_LVALUE_EXPR: case NOP_EXPR:
4314 /* Pass the constant down and see if we can make a simplification. If
4315 we can, replace this expression with the inner simplification for
4316 possible later conversion to our or some other type. */
4317 if (0 != (t1 = extract_muldiv (op0, convert (TREE_TYPE (op0), c), code,
4318 code == MULT_EXPR ? ctype : NULL_TREE)))
4322 case NEGATE_EXPR: case ABS_EXPR:
4323 if ((t1 = extract_muldiv (op0, c, code, wide_type)) != 0)
4324 return fold (build1 (tcode, ctype, convert (ctype, t1)));
4327 case MIN_EXPR: case MAX_EXPR:
4328 /* MIN (a, b) / 5 -> MIN (a / 5, b / 5) */
4329 if ((t1 = extract_muldiv (op0, c, code, wide_type)) != 0
4330 && (t2 = extract_muldiv (op1, c, code, wide_type)) != 0)
4332 if (tree_int_cst_sgn (c) < 0)
4333 tcode = (tcode == MIN_EXPR ? MAX_EXPR : MIN_EXPR);
4335 return fold (build (tcode, ctype, convert (ctype, t1),
4336 convert (ctype, t2)));
4340 case WITH_RECORD_EXPR:
4341 if ((t1 = extract_muldiv (TREE_OPERAND (t, 0), c, code, wide_type)) != 0)
4342 return build (WITH_RECORD_EXPR, TREE_TYPE (t1), t1,
4343 TREE_OPERAND (t, 1));
4347 /* If this has not been evaluated and the operand has no side effects,
4348 we can see if we can do something inside it and make a new one.
4349 Note that this test is overly conservative since we can do this
4350 if the only reason it had side effects is that it was another
4351 similar SAVE_EXPR, but that isn't worth bothering with. */
4352 if (SAVE_EXPR_RTL (t) == 0 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (t, 0))
4353 && 0 != (t1 = extract_muldiv (TREE_OPERAND (t, 0), c, code,
4355 return save_expr (t1);
4358 case LSHIFT_EXPR: case RSHIFT_EXPR:
4359 /* If the second operand is constant, this is a multiplication
4360 or floor division, by a power of two, so we can treat it that
4361 way unless the multiplier or divisor overflows. */
4362 if (TREE_CODE (op1) == INTEGER_CST
4363 && 0 != (t1 = convert (ctype,
4364 const_binop (LSHIFT_EXPR, size_one_node,
4366 && ! TREE_OVERFLOW (t1))
4367 return extract_muldiv (build (tcode == LSHIFT_EXPR
4368 ? MULT_EXPR : FLOOR_DIV_EXPR,
4369 ctype, convert (ctype, op0), t1),
4370 c, code, wide_type);
4373 case PLUS_EXPR: case MINUS_EXPR:
4374 /* See if we can eliminate the operation on both sides. If we can, we
4375 can return a new PLUS or MINUS. If we can't, the only remaining
4376 cases where we can do anything are if the second operand is a
4378 t1 = extract_muldiv (op0, c, code, wide_type);
4379 t2 = extract_muldiv (op1, c, code, wide_type);
4380 if (t1 != 0 && t2 != 0)
4381 return fold (build (tcode, ctype, convert (ctype, t1),
4382 convert (ctype, t2)));
4384 /* If this was a subtraction, negate OP1 and set it to be an addition.
4385 This simplifies the logic below. */
4386 if (tcode == MINUS_EXPR)
4387 tcode = PLUS_EXPR, op1 = negate_expr (op1);
4389 if (TREE_CODE (op1) != INTEGER_CST)
4392 /* If either OP1 or C are negative, this optimization is not safe for
4393 some of the division and remainder types while for others we need
4394 to change the code. */
4395 if (tree_int_cst_sgn (op1) < 0 || tree_int_cst_sgn (c) < 0)
4397 if (code == CEIL_DIV_EXPR)
4398 code = FLOOR_DIV_EXPR;
4399 else if (code == CEIL_MOD_EXPR)
4400 code = FLOOR_MOD_EXPR;
4401 else if (code == FLOOR_DIV_EXPR)
4402 code = CEIL_DIV_EXPR;
4403 else if (code == FLOOR_MOD_EXPR)
4404 code = CEIL_MOD_EXPR;
4405 else if (code != MULT_EXPR)
4409 /* Now do the operation and verify it doesn't overflow. */
4410 op1 = const_binop (code, convert (ctype, op1), convert (ctype, c), 0);
4411 if (op1 == 0 || TREE_OVERFLOW (op1))
4414 /* If we were able to eliminate our operation from the first side,
4415 apply our operation to the second side and reform the PLUS. */
4416 if (t1 != 0 && (TREE_CODE (t1) != code || code == MULT_EXPR))
4417 return fold (build (tcode, ctype, convert (ctype, t1), op1));
4419 /* The last case is if we are a multiply. In that case, we can
4420 apply the distributive law to commute the multiply and addition
4421 if the multiplication of the constants doesn't overflow. */
4422 if (code == MULT_EXPR)
4423 return fold (build (tcode, ctype, fold (build (code, ctype,
4424 convert (ctype, op0),
4425 convert (ctype, c))),
4431 /* We have a special case here if we are doing something like
4432 (C * 8) % 4 since we know that's zero. */
4433 if ((code == TRUNC_MOD_EXPR || code == CEIL_MOD_EXPR
4434 || code == FLOOR_MOD_EXPR || code == ROUND_MOD_EXPR)
4435 && TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
4436 && integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
4437 return omit_one_operand (type, integer_zero_node, op0);
4439 /* ... fall through ... */
4441 case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR:
4442 case ROUND_DIV_EXPR: case EXACT_DIV_EXPR:
4443 /* If we can extract our operation from the LHS, do so and return a
4444 new operation. Likewise for the RHS from a MULT_EXPR. Otherwise,
4445 do something only if the second operand is a constant. */
4447 && (t1 = extract_muldiv (op0, c, code, wide_type)) != 0)
4448 return fold (build (tcode, ctype, convert (ctype, t1),
4449 convert (ctype, op1)));
4450 else if (tcode == MULT_EXPR && code == MULT_EXPR
4451 && (t1 = extract_muldiv (op1, c, code, wide_type)) != 0)
4452 return fold (build (tcode, ctype, convert (ctype, op0),
4453 convert (ctype, t1)));
4454 else if (TREE_CODE (op1) != INTEGER_CST)
4457 /* If these are the same operation types, we can associate them
4458 assuming no overflow. */
4460 && 0 != (t1 = const_binop (MULT_EXPR, convert (ctype, op1),
4461 convert (ctype, c), 0))
4462 && ! TREE_OVERFLOW (t1))
4463 return fold (build (tcode, ctype, convert (ctype, op0), t1));
4465 /* If these operations "cancel" each other, we have the main
4466 optimizations of this pass, which occur when either constant is a
4467 multiple of the other, in which case we replace this with either an
4468 operation or CODE or TCODE. */
4469 if ((code == MULT_EXPR && tcode == EXACT_DIV_EXPR)
4470 || (tcode == MULT_EXPR
4471 && code != TRUNC_MOD_EXPR && code != CEIL_MOD_EXPR
4472 && code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR))
4474 if (integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
4475 return fold (build (tcode, ctype, convert (ctype, op0),
4477 const_binop (TRUNC_DIV_EXPR,
4479 else if (integer_zerop (const_binop (TRUNC_MOD_EXPR, c, op1, 0)))
4480 return fold (build (code, ctype, convert (ctype, op0),
4482 const_binop (TRUNC_DIV_EXPR,
4494 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
4495 S, a SAVE_EXPR, return the expression actually being evaluated. Note
4496 that we may sometimes modify the tree. */
4499 strip_compound_expr (t, s)
4503 enum tree_code code = TREE_CODE (t);
4505 /* See if this is the COMPOUND_EXPR we want to eliminate. */
4506 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
4507 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
4508 return TREE_OPERAND (t, 1);
4510 /* See if this is a COND_EXPR or a simple arithmetic operator. We
4511 don't bother handling any other types. */
4512 else if (code == COND_EXPR)
4514 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
4515 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
4516 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
4518 else if (TREE_CODE_CLASS (code) == '1')
4519 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
4520 else if (TREE_CODE_CLASS (code) == '<'
4521 || TREE_CODE_CLASS (code) == '2')
4523 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
4524 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
4530 /* Return a node which has the indicated constant VALUE (either 0 or
4531 1), and is of the indicated TYPE. */
4534 constant_boolean_node (value, type)
4538 if (type == integer_type_node)
4539 return value ? integer_one_node : integer_zero_node;
4540 else if (TREE_CODE (type) == BOOLEAN_TYPE)
4541 return truthvalue_conversion (value ? integer_one_node :
4545 tree t = build_int_2 (value, 0);
4547 TREE_TYPE (t) = type;
4552 /* Utility function for the following routine, to see how complex a nesting of
4553 COND_EXPRs can be. EXPR is the expression and LIMIT is a count beyond which
4554 we don't care (to avoid spending too much time on complex expressions.). */
4557 count_cond (expr, lim)
4563 if (TREE_CODE (expr) != COND_EXPR)
4568 true = count_cond (TREE_OPERAND (expr, 1), lim - 1);
4569 false = count_cond (TREE_OPERAND (expr, 2), lim - 1 - true);
4570 return MIN (lim, 1 + true + false);
4573 /* Perform constant folding and related simplification of EXPR.
4574 The related simplifications include x*1 => x, x*0 => 0, etc.,
4575 and application of the associative law.
4576 NOP_EXPR conversions may be removed freely (as long as we
4577 are careful not to change the C type of the overall expression)
4578 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
4579 but we can constant-fold them if they have constant operands. */
4585 register tree t = expr;
4586 tree t1 = NULL_TREE;
4588 tree type = TREE_TYPE (expr);
4589 register tree arg0 = NULL_TREE, arg1 = NULL_TREE;
4590 register enum tree_code code = TREE_CODE (t);
4593 /* WINS will be nonzero when the switch is done
4594 if all operands are constant. */
4597 /* Don't try to process an RTL_EXPR since its operands aren't trees.
4598 Likewise for a SAVE_EXPR that's already been evaluated. */
4599 if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t)) != 0)
4602 /* Return right away if already constant. */
4603 if (TREE_CONSTANT (t))
4605 if (code == CONST_DECL)
4606 return DECL_INITIAL (t);
4610 #ifdef MAX_INTEGER_COMPUTATION_MODE
4611 check_max_integer_computation_mode (expr);
4614 kind = TREE_CODE_CLASS (code);
4615 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
4619 /* Special case for conversion ops that can have fixed point args. */
4620 arg0 = TREE_OPERAND (t, 0);
4622 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
4624 STRIP_SIGN_NOPS (arg0);
4626 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
4627 subop = TREE_REALPART (arg0);
4631 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
4632 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4633 && TREE_CODE (subop) != REAL_CST
4634 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4636 /* Note that TREE_CONSTANT isn't enough:
4637 static var addresses are constant but we can't
4638 do arithmetic on them. */
4641 else if (kind == 'e' || kind == '<'
4642 || kind == '1' || kind == '2' || kind == 'r')
4644 register int len = tree_code_length[(int) code];
4646 for (i = 0; i < len; i++)
4648 tree op = TREE_OPERAND (t, i);
4652 continue; /* Valid for CALL_EXPR, at least. */
4654 if (kind == '<' || code == RSHIFT_EXPR)
4656 /* Signedness matters here. Perhaps we can refine this
4658 STRIP_SIGN_NOPS (op);
4662 /* Strip any conversions that don't change the mode. */
4666 if (TREE_CODE (op) == COMPLEX_CST)
4667 subop = TREE_REALPART (op);
4671 if (TREE_CODE (subop) != INTEGER_CST
4672 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4673 && TREE_CODE (subop) != REAL_CST
4674 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4676 /* Note that TREE_CONSTANT isn't enough:
4677 static var addresses are constant but we can't
4678 do arithmetic on them. */
4688 /* If this is a commutative operation, and ARG0 is a constant, move it
4689 to ARG1 to reduce the number of tests below. */
4690 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
4691 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
4692 || code == BIT_AND_EXPR)
4693 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
4695 tem = arg0; arg0 = arg1; arg1 = tem;
4697 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
4698 TREE_OPERAND (t, 1) = tem;
4701 /* Now WINS is set as described above,
4702 ARG0 is the first operand of EXPR,
4703 and ARG1 is the second operand (if it has more than one operand).
4705 First check for cases where an arithmetic operation is applied to a
4706 compound, conditional, or comparison operation. Push the arithmetic
4707 operation inside the compound or conditional to see if any folding
4708 can then be done. Convert comparison to conditional for this purpose.
4709 The also optimizes non-constant cases that used to be done in
4712 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
4713 one of the operands is a comparison and the other is a comparison, a
4714 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
4715 code below would make the expression more complex. Change it to a
4716 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
4717 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
4719 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
4720 || code == EQ_EXPR || code == NE_EXPR)
4721 && ((truth_value_p (TREE_CODE (arg0))
4722 && (truth_value_p (TREE_CODE (arg1))
4723 || (TREE_CODE (arg1) == BIT_AND_EXPR
4724 && integer_onep (TREE_OPERAND (arg1, 1)))))
4725 || (truth_value_p (TREE_CODE (arg1))
4726 && (truth_value_p (TREE_CODE (arg0))
4727 || (TREE_CODE (arg0) == BIT_AND_EXPR
4728 && integer_onep (TREE_OPERAND (arg0, 1)))))))
4730 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
4731 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
4735 if (code == EQ_EXPR)
4736 t = invert_truthvalue (t);
4741 if (TREE_CODE_CLASS (code) == '1')
4743 if (TREE_CODE (arg0) == COMPOUND_EXPR)
4744 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
4745 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
4746 else if (TREE_CODE (arg0) == COND_EXPR)
4748 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
4749 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
4750 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
4752 /* If this was a conversion, and all we did was to move into
4753 inside the COND_EXPR, bring it back out. But leave it if
4754 it is a conversion from integer to integer and the
4755 result precision is no wider than a word since such a
4756 conversion is cheap and may be optimized away by combine,
4757 while it couldn't if it were outside the COND_EXPR. Then return
4758 so we don't get into an infinite recursion loop taking the
4759 conversion out and then back in. */
4761 if ((code == NOP_EXPR || code == CONVERT_EXPR
4762 || code == NON_LVALUE_EXPR)
4763 && TREE_CODE (t) == COND_EXPR
4764 && TREE_CODE (TREE_OPERAND (t, 1)) == code
4765 && TREE_CODE (TREE_OPERAND (t, 2)) == code
4766 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
4767 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
4768 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
4770 (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))))
4771 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
4772 t = build1 (code, type,
4774 TREE_TYPE (TREE_OPERAND
4775 (TREE_OPERAND (t, 1), 0)),
4776 TREE_OPERAND (t, 0),
4777 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
4778 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
4781 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
4782 return fold (build (COND_EXPR, type, arg0,
4783 fold (build1 (code, type, integer_one_node)),
4784 fold (build1 (code, type, integer_zero_node))));
4786 else if (TREE_CODE_CLASS (code) == '2'
4787 || TREE_CODE_CLASS (code) == '<')
4789 if (TREE_CODE (arg1) == COMPOUND_EXPR)
4790 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
4791 fold (build (code, type,
4792 arg0, TREE_OPERAND (arg1, 1))));
4793 else if ((TREE_CODE (arg1) == COND_EXPR
4794 || (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
4795 && TREE_CODE_CLASS (code) != '<'))
4796 && (TREE_CODE (arg0) != COND_EXPR
4797 || count_cond (arg0, 25) + count_cond (arg1, 25) <= 25)
4798 && (! TREE_SIDE_EFFECTS (arg0)
4799 || (global_bindings_p () == 0
4800 && ! contains_placeholder_p (arg0))))
4802 tree test, true_value, false_value;
4803 tree lhs = 0, rhs = 0;
4805 if (TREE_CODE (arg1) == COND_EXPR)
4807 test = TREE_OPERAND (arg1, 0);
4808 true_value = TREE_OPERAND (arg1, 1);
4809 false_value = TREE_OPERAND (arg1, 2);
4813 tree testtype = TREE_TYPE (arg1);
4815 true_value = convert (testtype, integer_one_node);
4816 false_value = convert (testtype, integer_zero_node);
4819 /* If ARG0 is complex we want to make sure we only evaluate
4820 it once. Though this is only required if it is volatile, it
4821 might be more efficient even if it is not. However, if we
4822 succeed in folding one part to a constant, we do not need
4823 to make this SAVE_EXPR. Since we do this optimization
4824 primarily to see if we do end up with constant and this
4825 SAVE_EXPR interferes with later optimizations, suppressing
4826 it when we can is important.
4828 If we are not in a function, we can't make a SAVE_EXPR, so don't
4829 try to do so. Don't try to see if the result is a constant
4830 if an arm is a COND_EXPR since we get exponential behavior
4833 if (TREE_CODE (arg0) != SAVE_EXPR && ! TREE_CONSTANT (arg0)
4834 && global_bindings_p () == 0
4835 && ((TREE_CODE (arg0) != VAR_DECL
4836 && TREE_CODE (arg0) != PARM_DECL)
4837 || TREE_SIDE_EFFECTS (arg0)))
4839 if (TREE_CODE (true_value) != COND_EXPR)
4840 lhs = fold (build (code, type, arg0, true_value));
4842 if (TREE_CODE (false_value) != COND_EXPR)
4843 rhs = fold (build (code, type, arg0, false_value));
4845 if ((lhs == 0 || ! TREE_CONSTANT (lhs))
4846 && (rhs == 0 || !TREE_CONSTANT (rhs)))
4847 arg0 = save_expr (arg0), lhs = rhs = 0;
4851 lhs = fold (build (code, type, arg0, true_value));
4853 rhs = fold (build (code, type, arg0, false_value));
4855 test = fold (build (COND_EXPR, type, test, lhs, rhs));
4857 if (TREE_CODE (arg0) == SAVE_EXPR)
4858 return build (COMPOUND_EXPR, type,
4859 convert (void_type_node, arg0),
4860 strip_compound_expr (test, arg0));
4862 return convert (type, test);
4865 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
4866 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
4867 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
4868 else if ((TREE_CODE (arg0) == COND_EXPR
4869 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4870 && TREE_CODE_CLASS (code) != '<'))
4871 && (TREE_CODE (arg1) != COND_EXPR
4872 || count_cond (arg0, 25) + count_cond (arg1, 25) <= 25)
4873 && (! TREE_SIDE_EFFECTS (arg1)
4874 || (global_bindings_p () == 0
4875 && ! contains_placeholder_p (arg1))))
4877 tree test, true_value, false_value;
4878 tree lhs = 0, rhs = 0;
4880 if (TREE_CODE (arg0) == COND_EXPR)
4882 test = TREE_OPERAND (arg0, 0);
4883 true_value = TREE_OPERAND (arg0, 1);
4884 false_value = TREE_OPERAND (arg0, 2);
4888 tree testtype = TREE_TYPE (arg0);
4890 true_value = convert (testtype, integer_one_node);
4891 false_value = convert (testtype, integer_zero_node);
4894 if (TREE_CODE (arg1) != SAVE_EXPR && ! TREE_CONSTANT (arg0)
4895 && global_bindings_p () == 0
4896 && ((TREE_CODE (arg1) != VAR_DECL
4897 && TREE_CODE (arg1) != PARM_DECL)
4898 || TREE_SIDE_EFFECTS (arg1)))
4900 if (TREE_CODE (true_value) != COND_EXPR)
4901 lhs = fold (build (code, type, true_value, arg1));
4903 if (TREE_CODE (false_value) != COND_EXPR)
4904 rhs = fold (build (code, type, false_value, arg1));
4906 if ((lhs == 0 || ! TREE_CONSTANT (lhs))
4907 && (rhs == 0 || !TREE_CONSTANT (rhs)))
4908 arg1 = save_expr (arg1), lhs = rhs = 0;
4912 lhs = fold (build (code, type, true_value, arg1));
4915 rhs = fold (build (code, type, false_value, arg1));
4917 test = fold (build (COND_EXPR, type, test, lhs, rhs));
4918 if (TREE_CODE (arg1) == SAVE_EXPR)
4919 return build (COMPOUND_EXPR, type,
4920 convert (void_type_node, arg1),
4921 strip_compound_expr (test, arg1));
4923 return convert (type, test);
4926 else if (TREE_CODE_CLASS (code) == '<'
4927 && TREE_CODE (arg0) == COMPOUND_EXPR)
4928 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
4929 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
4930 else if (TREE_CODE_CLASS (code) == '<'
4931 && TREE_CODE (arg1) == COMPOUND_EXPR)
4932 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
4933 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
4945 return fold (DECL_INITIAL (t));
4950 case FIX_TRUNC_EXPR:
4951 /* Other kinds of FIX are not handled properly by fold_convert. */
4953 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
4954 return TREE_OPERAND (t, 0);
4956 /* Handle cases of two conversions in a row. */
4957 if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
4958 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
4960 tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
4961 tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
4962 tree final_type = TREE_TYPE (t);
4963 int inside_int = INTEGRAL_TYPE_P (inside_type);
4964 int inside_ptr = POINTER_TYPE_P (inside_type);
4965 int inside_float = FLOAT_TYPE_P (inside_type);
4966 int inside_prec = TYPE_PRECISION (inside_type);
4967 int inside_unsignedp = TREE_UNSIGNED (inside_type);
4968 int inter_int = INTEGRAL_TYPE_P (inter_type);
4969 int inter_ptr = POINTER_TYPE_P (inter_type);
4970 int inter_float = FLOAT_TYPE_P (inter_type);
4971 int inter_prec = TYPE_PRECISION (inter_type);
4972 int inter_unsignedp = TREE_UNSIGNED (inter_type);
4973 int final_int = INTEGRAL_TYPE_P (final_type);
4974 int final_ptr = POINTER_TYPE_P (final_type);
4975 int final_float = FLOAT_TYPE_P (final_type);
4976 int final_prec = TYPE_PRECISION (final_type);
4977 int final_unsignedp = TREE_UNSIGNED (final_type);
4979 /* In addition to the cases of two conversions in a row
4980 handled below, if we are converting something to its own
4981 type via an object of identical or wider precision, neither
4982 conversion is needed. */
4983 if (inside_type == final_type
4984 && ((inter_int && final_int) || (inter_float && final_float))
4985 && inter_prec >= final_prec)
4986 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4988 /* Likewise, if the intermediate and final types are either both
4989 float or both integer, we don't need the middle conversion if
4990 it is wider than the final type and doesn't change the signedness
4991 (for integers). Avoid this if the final type is a pointer
4992 since then we sometimes need the inner conversion. Likewise if
4993 the outer has a precision not equal to the size of its mode. */
4994 if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
4995 || (inter_float && inside_float))
4996 && inter_prec >= inside_prec
4997 && (inter_float || inter_unsignedp == inside_unsignedp)
4998 && ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (final_type))
4999 && TYPE_MODE (final_type) == TYPE_MODE (inter_type))
5001 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
5003 /* If we have a sign-extension of a zero-extended value, we can
5004 replace that by a single zero-extension. */
5005 if (inside_int && inter_int && final_int
5006 && inside_prec < inter_prec && inter_prec < final_prec
5007 && inside_unsignedp && !inter_unsignedp)
5008 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
5010 /* Two conversions in a row are not needed unless:
5011 - some conversion is floating-point (overstrict for now), or
5012 - the intermediate type is narrower than both initial and
5014 - the intermediate type and innermost type differ in signedness,
5015 and the outermost type is wider than the intermediate, or
5016 - the initial type is a pointer type and the precisions of the
5017 intermediate and final types differ, or
5018 - the final type is a pointer type and the precisions of the
5019 initial and intermediate types differ. */
5020 if (! inside_float && ! inter_float && ! final_float
5021 && (inter_prec > inside_prec || inter_prec > final_prec)
5022 && ! (inside_int && inter_int
5023 && inter_unsignedp != inside_unsignedp
5024 && inter_prec < final_prec)
5025 && ((inter_unsignedp && inter_prec > inside_prec)
5026 == (final_unsignedp && final_prec > inter_prec))
5027 && ! (inside_ptr && inter_prec != final_prec)
5028 && ! (final_ptr && inside_prec != inter_prec)
5029 && ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (final_type))
5030 && TYPE_MODE (final_type) == TYPE_MODE (inter_type))
5032 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
5035 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
5036 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
5037 /* Detect assigning a bitfield. */
5038 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
5039 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
5041 /* Don't leave an assignment inside a conversion
5042 unless assigning a bitfield. */
5043 tree prev = TREE_OPERAND (t, 0);
5044 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
5045 /* First do the assignment, then return converted constant. */
5046 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
5052 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
5055 return fold_convert (t, arg0);
5057 #if 0 /* This loses on &"foo"[0]. */
5062 /* Fold an expression like: "foo"[2] */
5063 if (TREE_CODE (arg0) == STRING_CST
5064 && TREE_CODE (arg1) == INTEGER_CST
5065 && !TREE_INT_CST_HIGH (arg1)
5066 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
5068 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
5069 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
5070 force_fit_type (t, 0);
5077 if (TREE_CODE (arg0) == CONSTRUCTOR)
5079 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
5086 TREE_CONSTANT (t) = wins;
5092 if (TREE_CODE (arg0) == INTEGER_CST)
5094 HOST_WIDE_INT low, high;
5095 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
5096 TREE_INT_CST_HIGH (arg0),
5098 t = build_int_2 (low, high);
5099 TREE_TYPE (t) = type;
5101 = (TREE_OVERFLOW (arg0)
5102 | force_fit_type (t, overflow && !TREE_UNSIGNED (type)));
5103 TREE_CONSTANT_OVERFLOW (t)
5104 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
5106 else if (TREE_CODE (arg0) == REAL_CST)
5107 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
5109 else if (TREE_CODE (arg0) == NEGATE_EXPR)
5110 return TREE_OPERAND (arg0, 0);
5112 /* Convert - (a - b) to (b - a) for non-floating-point. */
5113 else if (TREE_CODE (arg0) == MINUS_EXPR
5114 && (! FLOAT_TYPE_P (type) || flag_fast_math))
5115 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
5116 TREE_OPERAND (arg0, 0));
5123 if (TREE_CODE (arg0) == INTEGER_CST)
5125 if (! TREE_UNSIGNED (type)
5126 && TREE_INT_CST_HIGH (arg0) < 0)
5128 HOST_WIDE_INT low, high;
5129 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
5130 TREE_INT_CST_HIGH (arg0),
5132 t = build_int_2 (low, high);
5133 TREE_TYPE (t) = type;
5135 = (TREE_OVERFLOW (arg0)
5136 | force_fit_type (t, overflow));
5137 TREE_CONSTANT_OVERFLOW (t)
5138 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
5141 else if (TREE_CODE (arg0) == REAL_CST)
5143 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
5144 t = build_real (type,
5145 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
5148 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
5149 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
5153 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5155 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5156 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
5157 TREE_OPERAND (arg0, 0),
5158 negate_expr (TREE_OPERAND (arg0, 1)));
5159 else if (TREE_CODE (arg0) == COMPLEX_CST)
5160 return build_complex (type, TREE_OPERAND (arg0, 0),
5161 negate_expr (TREE_OPERAND (arg0, 1)));
5162 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5163 return fold (build (TREE_CODE (arg0), type,
5164 fold (build1 (CONJ_EXPR, type,
5165 TREE_OPERAND (arg0, 0))),
5166 fold (build1 (CONJ_EXPR,
5167 type, TREE_OPERAND (arg0, 1)))));
5168 else if (TREE_CODE (arg0) == CONJ_EXPR)
5169 return TREE_OPERAND (arg0, 0);
5175 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
5176 ~ TREE_INT_CST_HIGH (arg0));
5177 TREE_TYPE (t) = type;
5178 force_fit_type (t, 0);
5179 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
5180 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
5182 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
5183 return TREE_OPERAND (arg0, 0);
5187 /* A + (-B) -> A - B */
5188 if (TREE_CODE (arg1) == NEGATE_EXPR)
5189 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
5190 /* (-A) + B -> B - A */
5191 if (TREE_CODE (arg0) == NEGATE_EXPR)
5192 return fold (build (MINUS_EXPR, type, arg1, TREE_OPERAND (arg0, 0)));
5193 else if (! FLOAT_TYPE_P (type))
5195 if (integer_zerop (arg1))
5196 return non_lvalue (convert (type, arg0));
5198 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
5199 with a constant, and the two constants have no bits in common,
5200 we should treat this as a BIT_IOR_EXPR since this may produce more
5202 if (TREE_CODE (arg0) == BIT_AND_EXPR
5203 && TREE_CODE (arg1) == BIT_AND_EXPR
5204 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
5205 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
5206 && integer_zerop (const_binop (BIT_AND_EXPR,
5207 TREE_OPERAND (arg0, 1),
5208 TREE_OPERAND (arg1, 1), 0)))
5210 code = BIT_IOR_EXPR;
5214 /* Reassociate (plus (plus (mult) (foo)) (mult)) as
5215 (plus (plus (mult) (mult)) (foo)) so that we can
5216 take advantage of the factoring cases below. */
5217 if ((TREE_CODE (arg0) == PLUS_EXPR
5218 && TREE_CODE (arg1) == MULT_EXPR)
5219 || (TREE_CODE (arg1) == PLUS_EXPR
5220 && TREE_CODE (arg0) == MULT_EXPR))
5222 tree parg0, parg1, parg, marg;
5224 if (TREE_CODE (arg0) == PLUS_EXPR)
5225 parg = arg0, marg = arg1;
5227 parg = arg1, marg = arg0;
5228 parg0 = TREE_OPERAND (parg, 0);
5229 parg1 = TREE_OPERAND (parg, 1);
5233 if (TREE_CODE (parg0) == MULT_EXPR
5234 && TREE_CODE (parg1) != MULT_EXPR)
5235 return fold (build (PLUS_EXPR, type,
5236 fold (build (PLUS_EXPR, type, parg0, marg)),
5238 if (TREE_CODE (parg0) != MULT_EXPR
5239 && TREE_CODE (parg1) == MULT_EXPR)
5240 return fold (build (PLUS_EXPR, type,
5241 fold (build (PLUS_EXPR, type, parg1, marg)),
5245 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR)
5247 tree arg00, arg01, arg10, arg11;
5248 tree alt0 = NULL_TREE, alt1 = NULL_TREE, same;
5250 /* (A * C) + (B * C) -> (A+B) * C.
5251 We are most concerned about the case where C is a constant,
5252 but other combinations show up during loop reduction. Since
5253 it is not difficult, try all four possibilities. */
5255 arg00 = TREE_OPERAND (arg0, 0);
5256 arg01 = TREE_OPERAND (arg0, 1);
5257 arg10 = TREE_OPERAND (arg1, 0);
5258 arg11 = TREE_OPERAND (arg1, 1);
5261 if (operand_equal_p (arg01, arg11, 0))
5262 same = arg01, alt0 = arg00, alt1 = arg10;
5263 else if (operand_equal_p (arg00, arg10, 0))
5264 same = arg00, alt0 = arg01, alt1 = arg11;
5265 else if (operand_equal_p (arg00, arg11, 0))
5266 same = arg00, alt0 = arg01, alt1 = arg10;
5267 else if (operand_equal_p (arg01, arg10, 0))
5268 same = arg01, alt0 = arg00, alt1 = arg11;
5270 /* No identical multiplicands; see if we can find a common
5271 power-of-two factor in non-power-of-two multiplies. This
5272 can help in multi-dimensional array access. */
5273 else if (TREE_CODE (arg01) == INTEGER_CST
5274 && TREE_CODE (arg11) == INTEGER_CST
5275 && TREE_INT_CST_HIGH (arg01) == 0
5276 && TREE_INT_CST_HIGH (arg11) == 0)
5278 HOST_WIDE_INT int01, int11, tmp;
5279 int01 = TREE_INT_CST_LOW (arg01);
5280 int11 = TREE_INT_CST_LOW (arg11);
5282 /* Move min of absolute values to int11. */
5283 if ((int01 >= 0 ? int01 : -int01)
5284 < (int11 >= 0 ? int11 : -int11))
5286 tmp = int01, int01 = int11, int11 = tmp;
5287 alt0 = arg00, arg00 = arg10, arg10 = alt0;
5288 alt0 = arg01, arg01 = arg11, arg11 = alt0;
5291 if (exact_log2 (int11) > 0 && int01 % int11 == 0)
5293 alt0 = fold (build (MULT_EXPR, type, arg00,
5294 build_int_2 (int01 / int11, 0)));
5301 return fold (build (MULT_EXPR, type,
5302 fold (build (PLUS_EXPR, type, alt0, alt1)),
5306 /* In IEEE floating point, x+0 may not equal x. */
5307 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5309 && real_zerop (arg1))
5310 return non_lvalue (convert (type, arg0));
5311 /* x+(-0) equals x, even for IEEE. */
5312 else if (TREE_CODE (arg1) == REAL_CST
5313 && REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (arg1)))
5314 return non_lvalue (convert (type, arg0));
5317 /* (A << C1) + (A >> C2) if A is unsigned and C1+C2 is the size of A
5318 is a rotate of A by C1 bits. */
5319 /* (A << B) + (A >> (Z - B)) if A is unsigned and Z is the size of A
5320 is a rotate of A by B bits. */
5322 register enum tree_code code0, code1;
5323 code0 = TREE_CODE (arg0);
5324 code1 = TREE_CODE (arg1);
5325 if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
5326 || (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR))
5327 && operand_equal_p (TREE_OPERAND (arg0, 0),
5328 TREE_OPERAND (arg1,0), 0)
5329 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
5331 register tree tree01, tree11;
5332 register enum tree_code code01, code11;
5334 tree01 = TREE_OPERAND (arg0, 1);
5335 tree11 = TREE_OPERAND (arg1, 1);
5336 STRIP_NOPS (tree01);
5337 STRIP_NOPS (tree11);
5338 code01 = TREE_CODE (tree01);
5339 code11 = TREE_CODE (tree11);
5340 if (code01 == INTEGER_CST
5341 && code11 == INTEGER_CST
5342 && TREE_INT_CST_HIGH (tree01) == 0
5343 && TREE_INT_CST_HIGH (tree11) == 0
5344 && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
5345 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
5346 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
5347 code0 == LSHIFT_EXPR ? tree01 : tree11);
5348 else if (code11 == MINUS_EXPR)
5350 tree tree110, tree111;
5351 tree110 = TREE_OPERAND (tree11, 0);
5352 tree111 = TREE_OPERAND (tree11, 1);
5353 STRIP_NOPS (tree110);
5354 STRIP_NOPS (tree111);
5355 if (TREE_CODE (tree110) == INTEGER_CST
5356 && TREE_INT_CST_HIGH (tree110) == 0
5357 && (TREE_INT_CST_LOW (tree110)
5358 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))))
5359 && operand_equal_p (tree01, tree111, 0))
5360 return build ((code0 == LSHIFT_EXPR
5363 type, TREE_OPERAND (arg0, 0), tree01);
5365 else if (code01 == MINUS_EXPR)
5367 tree tree010, tree011;
5368 tree010 = TREE_OPERAND (tree01, 0);
5369 tree011 = TREE_OPERAND (tree01, 1);
5370 STRIP_NOPS (tree010);
5371 STRIP_NOPS (tree011);
5372 if (TREE_CODE (tree010) == INTEGER_CST
5373 && TREE_INT_CST_HIGH (tree010) == 0
5374 && (TREE_INT_CST_LOW (tree010)
5375 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))))
5376 && operand_equal_p (tree11, tree011, 0))
5377 return build ((code0 != LSHIFT_EXPR
5380 type, TREE_OPERAND (arg0, 0), tree11);
5387 /* In most languages, can't associate operations on floats through
5388 parentheses. Rather than remember where the parentheses were, we
5389 don't associate floats at all. It shouldn't matter much. However,
5390 associating multiplications is only very slightly inaccurate, so do
5391 that if -ffast-math is specified. */
5394 && (! FLOAT_TYPE_P (type)
5395 || (flag_fast_math && code != MULT_EXPR)))
5397 tree var0, con0, lit0, var1, con1, lit1;
5399 /* Split both trees into variables, constants, and literals. Then
5400 associate each group together, the constants with literals,
5401 then the result with variables. This increases the chances of
5402 literals being recombined later and of generating relocatable
5403 expressions for the sum of a constant and literal. */
5404 var0 = split_tree (arg0, code, &con0, &lit0, 0);
5405 var1 = split_tree (arg1, code, &con1, &lit1, code == MINUS_EXPR);
5407 /* Only do something if we found more than two objects. Otherwise,
5408 nothing has changed and we risk infinite recursion. */
5409 if (2 < ((var0 != 0) + (var1 != 0) + (con0 != 0) + (con1 != 0)
5410 + (lit0 != 0) + (lit1 != 0)))
5412 var0 = associate_trees (var0, var1, code, type);
5413 con0 = associate_trees (con0, con1, code, type);
5414 lit0 = associate_trees (lit0, lit1, code, type);
5415 con0 = associate_trees (con0, lit0, code, type);
5416 return convert (type, associate_trees (var0, con0, code, type));
5421 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
5422 if (TREE_CODE (arg1) == REAL_CST)
5424 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
5426 t1 = const_binop (code, arg0, arg1, 0);
5427 if (t1 != NULL_TREE)
5429 /* The return value should always have
5430 the same type as the original expression. */
5431 if (TREE_TYPE (t1) != TREE_TYPE (t))
5432 t1 = convert (TREE_TYPE (t), t1);
5439 /* A - (-B) -> A + B */
5440 if (TREE_CODE (arg1) == NEGATE_EXPR)
5441 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
5442 /* (-A) - CST -> (-CST) - A for floating point (what about ints ?) */
5443 if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == REAL_CST)
5445 fold (build (MINUS_EXPR, type,
5446 build_real (TREE_TYPE (arg1),
5447 REAL_VALUE_NEGATE (TREE_REAL_CST (arg1))),
5448 TREE_OPERAND (arg0, 0)));
5450 if (! FLOAT_TYPE_P (type))
5452 if (! wins && integer_zerop (arg0))
5453 return negate_expr (arg1);
5454 if (integer_zerop (arg1))
5455 return non_lvalue (convert (type, arg0));
5457 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
5458 about the case where C is a constant, just try one of the
5459 four possibilities. */
5461 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
5462 && operand_equal_p (TREE_OPERAND (arg0, 1),
5463 TREE_OPERAND (arg1, 1), 0))
5464 return fold (build (MULT_EXPR, type,
5465 fold (build (MINUS_EXPR, type,
5466 TREE_OPERAND (arg0, 0),
5467 TREE_OPERAND (arg1, 0))),
5468 TREE_OPERAND (arg0, 1)));
5471 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5474 /* Except with IEEE floating point, 0-x equals -x. */
5475 if (! wins && real_zerop (arg0))
5476 return negate_expr (arg1);
5477 /* Except with IEEE floating point, x-0 equals x. */
5478 if (real_zerop (arg1))
5479 return non_lvalue (convert (type, arg0));
5482 /* Fold &x - &x. This can happen from &x.foo - &x.
5483 This is unsafe for certain floats even in non-IEEE formats.
5484 In IEEE, it is unsafe because it does wrong for NaNs.
5485 Also note that operand_equal_p is always false if an operand
5488 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
5489 && operand_equal_p (arg0, arg1, 0))
5490 return convert (type, integer_zero_node);
5495 /* (-A) * (-B) -> A * B */
5496 if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == NEGATE_EXPR)
5497 return fold (build (MULT_EXPR, type, TREE_OPERAND (arg0, 0),
5498 TREE_OPERAND (arg1, 0)));
5500 if (! FLOAT_TYPE_P (type))
5502 if (integer_zerop (arg1))
5503 return omit_one_operand (type, arg1, arg0);
5504 if (integer_onep (arg1))
5505 return non_lvalue (convert (type, arg0));
5507 /* (a * (1 << b)) is (a << b) */
5508 if (TREE_CODE (arg1) == LSHIFT_EXPR
5509 && integer_onep (TREE_OPERAND (arg1, 0)))
5510 return fold (build (LSHIFT_EXPR, type, arg0,
5511 TREE_OPERAND (arg1, 1)));
5512 if (TREE_CODE (arg0) == LSHIFT_EXPR
5513 && integer_onep (TREE_OPERAND (arg0, 0)))
5514 return fold (build (LSHIFT_EXPR, type, arg1,
5515 TREE_OPERAND (arg0, 1)));
5517 if (TREE_CODE (arg1) == INTEGER_CST
5518 && 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), arg1,
5520 return convert (type, tem);
5525 /* x*0 is 0, except for IEEE floating point. */
5526 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5528 && real_zerop (arg1))
5529 return omit_one_operand (type, arg1, arg0);
5530 /* In IEEE floating point, x*1 is not equivalent to x for snans.
5531 However, ANSI says we can drop signals,
5532 so we can do this anyway. */
5533 if (real_onep (arg1))
5534 return non_lvalue (convert (type, arg0));
5536 if (! wins && real_twop (arg1) && global_bindings_p () == 0
5537 && ! contains_placeholder_p (arg0))
5539 tree arg = save_expr (arg0);
5540 return build (PLUS_EXPR, type, arg, arg);
5547 if (integer_all_onesp (arg1))
5548 return omit_one_operand (type, arg1, arg0);
5549 if (integer_zerop (arg1))
5550 return non_lvalue (convert (type, arg0));
5551 t1 = distribute_bit_expr (code, type, arg0, arg1);
5552 if (t1 != NULL_TREE)
5555 /* Convert (or (not arg0) (not arg1)) to (not (and (arg0) (arg1))).
5557 This results in more efficient code for machines without a NAND
5558 instruction. Combine will canonicalize to the first form
5559 which will allow use of NAND instructions provided by the
5560 backend if they exist. */
5561 if (TREE_CODE (arg0) == BIT_NOT_EXPR
5562 && TREE_CODE (arg1) == BIT_NOT_EXPR)
5564 return fold (build1 (BIT_NOT_EXPR, type,
5565 build (BIT_AND_EXPR, type,
5566 TREE_OPERAND (arg0, 0),
5567 TREE_OPERAND (arg1, 0))));
5570 /* See if this can be simplified into a rotate first. If that
5571 is unsuccessful continue in the association code. */
5575 if (integer_zerop (arg1))
5576 return non_lvalue (convert (type, arg0));
5577 if (integer_all_onesp (arg1))
5578 return fold (build1 (BIT_NOT_EXPR, type, arg0));
5580 /* If we are XORing two BIT_AND_EXPR's, both of which are and'ing
5581 with a constant, and the two constants have no bits in common,
5582 we should treat this as a BIT_IOR_EXPR since this may produce more
5584 if (TREE_CODE (arg0) == BIT_AND_EXPR
5585 && TREE_CODE (arg1) == BIT_AND_EXPR
5586 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
5587 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
5588 && integer_zerop (const_binop (BIT_AND_EXPR,
5589 TREE_OPERAND (arg0, 1),
5590 TREE_OPERAND (arg1, 1), 0)))
5592 code = BIT_IOR_EXPR;
5596 /* See if this can be simplified into a rotate first. If that
5597 is unsuccessful continue in the association code. */
5602 if (integer_all_onesp (arg1))
5603 return non_lvalue (convert (type, arg0));
5604 if (integer_zerop (arg1))
5605 return omit_one_operand (type, arg1, arg0);
5606 t1 = distribute_bit_expr (code, type, arg0, arg1);
5607 if (t1 != NULL_TREE)
5609 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
5610 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
5611 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
5613 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
5614 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
5615 && (~TREE_INT_CST_LOW (arg0)
5616 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
5617 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
5619 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
5620 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
5622 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
5623 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
5624 && (~TREE_INT_CST_LOW (arg1)
5625 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
5626 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
5629 /* Convert (and (not arg0) (not arg1)) to (not (or (arg0) (arg1))).
5631 This results in more efficient code for machines without a NOR
5632 instruction. Combine will canonicalize to the first form
5633 which will allow use of NOR instructions provided by the
5634 backend if they exist. */
5635 if (TREE_CODE (arg0) == BIT_NOT_EXPR
5636 && TREE_CODE (arg1) == BIT_NOT_EXPR)
5638 return fold (build1 (BIT_NOT_EXPR, type,
5639 build (BIT_IOR_EXPR, type,
5640 TREE_OPERAND (arg0, 0),
5641 TREE_OPERAND (arg1, 0))));
5646 case BIT_ANDTC_EXPR:
5647 if (integer_all_onesp (arg0))
5648 return non_lvalue (convert (type, arg1));
5649 if (integer_zerop (arg0))
5650 return omit_one_operand (type, arg0, arg1);
5651 if (TREE_CODE (arg1) == INTEGER_CST)
5653 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
5654 code = BIT_AND_EXPR;
5660 /* In most cases, do nothing with a divide by zero. */
5661 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
5662 #ifndef REAL_INFINITY
5663 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
5666 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
5668 /* (-A) / (-B) -> A / B */
5669 if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == NEGATE_EXPR)
5670 return fold (build (RDIV_EXPR, type, TREE_OPERAND (arg0, 0),
5671 TREE_OPERAND (arg1, 0)));
5673 /* In IEEE floating point, x/1 is not equivalent to x for snans.
5674 However, ANSI says we can drop signals, so we can do this anyway. */
5675 if (real_onep (arg1))
5676 return non_lvalue (convert (type, arg0));
5678 /* If ARG1 is a constant, we can convert this to a multiply by the
5679 reciprocal. This does not have the same rounding properties,
5680 so only do this if -ffast-math. We can actually always safely
5681 do it if ARG1 is a power of two, but it's hard to tell if it is
5682 or not in a portable manner. */
5683 if (TREE_CODE (arg1) == REAL_CST)
5686 && 0 != (tem = const_binop (code, build_real (type, dconst1),
5688 return fold (build (MULT_EXPR, type, arg0, tem));
5689 /* Find the reciprocal if optimizing and the result is exact. */
5693 r = TREE_REAL_CST (arg1);
5694 if (exact_real_inverse (TYPE_MODE(TREE_TYPE(arg0)), &r))
5696 tem = build_real (type, r);
5697 return fold (build (MULT_EXPR, type, arg0, tem));
5703 case TRUNC_DIV_EXPR:
5704 case ROUND_DIV_EXPR:
5705 case FLOOR_DIV_EXPR:
5707 case EXACT_DIV_EXPR:
5708 if (integer_onep (arg1))
5709 return non_lvalue (convert (type, arg0));
5710 if (integer_zerop (arg1))
5713 /* If arg0 is a multiple of arg1, then rewrite to the fastest div
5714 operation, EXACT_DIV_EXPR.
5716 Note that only CEIL_DIV_EXPR and FLOOR_DIV_EXPR are rewritten now.
5717 At one time others generated faster code, it's not clear if they do
5718 after the last round to changes to the DIV code in expmed.c. */
5719 if ((code == CEIL_DIV_EXPR || code == FLOOR_DIV_EXPR)
5720 && multiple_of_p (type, arg0, arg1))
5721 return fold (build (EXACT_DIV_EXPR, type, arg0, arg1));
5723 if (TREE_CODE (arg1) == INTEGER_CST
5724 && 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), arg1,
5726 return convert (type, tem);
5731 case FLOOR_MOD_EXPR:
5732 case ROUND_MOD_EXPR:
5733 case TRUNC_MOD_EXPR:
5734 if (integer_onep (arg1))
5735 return omit_one_operand (type, integer_zero_node, arg0);
5736 if (integer_zerop (arg1))
5739 if (TREE_CODE (arg1) == INTEGER_CST
5740 && 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), arg1,
5742 return convert (type, tem);
5750 if (integer_zerop (arg1))
5751 return non_lvalue (convert (type, arg0));
5752 /* Since negative shift count is not well-defined,
5753 don't try to compute it in the compiler. */
5754 if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
5756 /* Rewrite an LROTATE_EXPR by a constant into an
5757 RROTATE_EXPR by a new constant. */
5758 if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
5760 TREE_SET_CODE (t, RROTATE_EXPR);
5761 code = RROTATE_EXPR;
5762 TREE_OPERAND (t, 1) = arg1
5765 convert (TREE_TYPE (arg1),
5766 build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type)), 0)),
5768 if (tree_int_cst_sgn (arg1) < 0)
5772 /* If we have a rotate of a bit operation with the rotate count and
5773 the second operand of the bit operation both constant,
5774 permute the two operations. */
5775 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
5776 && (TREE_CODE (arg0) == BIT_AND_EXPR
5777 || TREE_CODE (arg0) == BIT_ANDTC_EXPR
5778 || TREE_CODE (arg0) == BIT_IOR_EXPR
5779 || TREE_CODE (arg0) == BIT_XOR_EXPR)
5780 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
5781 return fold (build (TREE_CODE (arg0), type,
5782 fold (build (code, type,
5783 TREE_OPERAND (arg0, 0), arg1)),
5784 fold (build (code, type,
5785 TREE_OPERAND (arg0, 1), arg1))));
5787 /* Two consecutive rotates adding up to the width of the mode can
5789 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
5790 && TREE_CODE (arg0) == RROTATE_EXPR
5791 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
5792 && TREE_INT_CST_HIGH (arg1) == 0
5793 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
5794 && ((TREE_INT_CST_LOW (arg1)
5795 + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
5796 == GET_MODE_BITSIZE (TYPE_MODE (type))))
5797 return TREE_OPERAND (arg0, 0);
5802 if (operand_equal_p (arg0, arg1, 0))
5804 if (INTEGRAL_TYPE_P (type)
5805 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
5806 return omit_one_operand (type, arg1, arg0);
5810 if (operand_equal_p (arg0, arg1, 0))
5812 if (INTEGRAL_TYPE_P (type)
5813 && TYPE_MAX_VALUE (type)
5814 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
5815 return omit_one_operand (type, arg1, arg0);
5818 case TRUTH_NOT_EXPR:
5819 /* Note that the operand of this must be an int
5820 and its values must be 0 or 1.
5821 ("true" is a fixed value perhaps depending on the language,
5822 but we don't handle values other than 1 correctly yet.) */
5823 tem = invert_truthvalue (arg0);
5824 /* Avoid infinite recursion. */
5825 if (TREE_CODE (tem) == TRUTH_NOT_EXPR)
5827 return convert (type, tem);
5829 case TRUTH_ANDIF_EXPR:
5830 /* Note that the operands of this must be ints
5831 and their values must be 0 or 1.
5832 ("true" is a fixed value perhaps depending on the language.) */
5833 /* If first arg is constant zero, return it. */
5834 if (integer_zerop (arg0))
5836 case TRUTH_AND_EXPR:
5837 /* If either arg is constant true, drop it. */
5838 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
5839 return non_lvalue (arg1);
5840 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
5841 return non_lvalue (arg0);
5842 /* If second arg is constant zero, result is zero, but first arg
5843 must be evaluated. */
5844 if (integer_zerop (arg1))
5845 return omit_one_operand (type, arg1, arg0);
5846 /* Likewise for first arg, but note that only the TRUTH_AND_EXPR
5847 case will be handled here. */
5848 if (integer_zerop (arg0))
5849 return omit_one_operand (type, arg0, arg1);
5852 /* We only do these simplifications if we are optimizing. */
5856 /* Check for things like (A || B) && (A || C). We can convert this
5857 to A || (B && C). Note that either operator can be any of the four
5858 truth and/or operations and the transformation will still be
5859 valid. Also note that we only care about order for the
5860 ANDIF and ORIF operators. If B contains side effects, this
5861 might change the truth-value of A. */
5862 if (TREE_CODE (arg0) == TREE_CODE (arg1)
5863 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
5864 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
5865 || TREE_CODE (arg0) == TRUTH_AND_EXPR
5866 || TREE_CODE (arg0) == TRUTH_OR_EXPR)
5867 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg0, 1)))
5869 tree a00 = TREE_OPERAND (arg0, 0);
5870 tree a01 = TREE_OPERAND (arg0, 1);
5871 tree a10 = TREE_OPERAND (arg1, 0);
5872 tree a11 = TREE_OPERAND (arg1, 1);
5873 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
5874 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
5875 && (code == TRUTH_AND_EXPR
5876 || code == TRUTH_OR_EXPR));
5878 if (operand_equal_p (a00, a10, 0))
5879 return fold (build (TREE_CODE (arg0), type, a00,
5880 fold (build (code, type, a01, a11))));
5881 else if (commutative && operand_equal_p (a00, a11, 0))
5882 return fold (build (TREE_CODE (arg0), type, a00,
5883 fold (build (code, type, a01, a10))));
5884 else if (commutative && operand_equal_p (a01, a10, 0))
5885 return fold (build (TREE_CODE (arg0), type, a01,
5886 fold (build (code, type, a00, a11))));
5888 /* This case if tricky because we must either have commutative
5889 operators or else A10 must not have side-effects. */
5891 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
5892 && operand_equal_p (a01, a11, 0))
5893 return fold (build (TREE_CODE (arg0), type,
5894 fold (build (code, type, a00, a10)),
5898 /* See if we can build a range comparison. */
5899 if (0 != (tem = fold_range_test (t)))
5902 /* Check for the possibility of merging component references. If our
5903 lhs is another similar operation, try to merge its rhs with our
5904 rhs. Then try to merge our lhs and rhs. */
5905 if (TREE_CODE (arg0) == code
5906 && 0 != (tem = fold_truthop (code, type,
5907 TREE_OPERAND (arg0, 1), arg1)))
5908 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
5910 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
5915 case TRUTH_ORIF_EXPR:
5916 /* Note that the operands of this must be ints
5917 and their values must be 0 or true.
5918 ("true" is a fixed value perhaps depending on the language.) */
5919 /* If first arg is constant true, return it. */
5920 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
5923 /* If either arg is constant zero, drop it. */
5924 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
5925 return non_lvalue (arg1);
5926 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
5927 return non_lvalue (arg0);
5928 /* If second arg is constant true, result is true, but we must
5929 evaluate first arg. */
5930 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
5931 return omit_one_operand (type, arg1, arg0);
5932 /* Likewise for first arg, but note this only occurs here for
5934 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
5935 return omit_one_operand (type, arg0, arg1);
5938 case TRUTH_XOR_EXPR:
5939 /* If either arg is constant zero, drop it. */
5940 if (integer_zerop (arg0))
5941 return non_lvalue (arg1);
5942 if (integer_zerop (arg1))
5943 return non_lvalue (arg0);
5944 /* If either arg is constant true, this is a logical inversion. */
5945 if (integer_onep (arg0))
5946 return non_lvalue (invert_truthvalue (arg1));
5947 if (integer_onep (arg1))
5948 return non_lvalue (invert_truthvalue (arg0));
5957 if (FLOAT_TYPE_P (TREE_TYPE (arg0)))
5959 /* (-a) CMP (-b) -> b CMP a */
5960 if (TREE_CODE (arg0) == NEGATE_EXPR
5961 && TREE_CODE (arg1) == NEGATE_EXPR)
5962 return fold (build (code, type, TREE_OPERAND (arg1, 0),
5963 TREE_OPERAND (arg0, 0)));
5964 /* (-a) CMP CST -> a swap(CMP) (-CST) */
5965 if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == REAL_CST)
5968 (swap_tree_comparison (code), type,
5969 TREE_OPERAND (arg0, 0),
5970 build_real (TREE_TYPE (arg1),
5971 REAL_VALUE_NEGATE (TREE_REAL_CST (arg1)))));
5972 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
5973 /* a CMP (-0) -> a CMP 0 */
5974 if (TREE_CODE (arg1) == REAL_CST
5975 && REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (arg1)))
5976 return fold (build (code, type, arg0,
5977 build_real (TREE_TYPE (arg1), dconst0)));
5981 /* If one arg is a constant integer, put it last. */
5982 if (TREE_CODE (arg0) == INTEGER_CST
5983 && TREE_CODE (arg1) != INTEGER_CST)
5985 TREE_OPERAND (t, 0) = arg1;
5986 TREE_OPERAND (t, 1) = arg0;
5987 arg0 = TREE_OPERAND (t, 0);
5988 arg1 = TREE_OPERAND (t, 1);
5989 code = swap_tree_comparison (code);
5990 TREE_SET_CODE (t, code);
5993 /* Convert foo++ == CONST into ++foo == CONST + INCR.
5994 First, see if one arg is constant; find the constant arg
5995 and the other one. */
5997 tree constop = 0, varop = NULL_TREE;
5998 int constopnum = -1;
6000 if (TREE_CONSTANT (arg1))
6001 constopnum = 1, constop = arg1, varop = arg0;
6002 if (TREE_CONSTANT (arg0))
6003 constopnum = 0, constop = arg0, varop = arg1;
6005 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
6007 /* This optimization is invalid for ordered comparisons
6008 if CONST+INCR overflows or if foo+incr might overflow.
6009 This optimization is invalid for floating point due to rounding.
6010 For pointer types we assume overflow doesn't happen. */
6011 if (POINTER_TYPE_P (TREE_TYPE (varop))
6012 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
6013 && (code == EQ_EXPR || code == NE_EXPR)))
6016 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
6017 constop, TREE_OPERAND (varop, 1)));
6018 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
6020 /* If VAROP is a reference to a bitfield, we must mask
6021 the constant by the width of the field. */
6022 if (TREE_CODE (TREE_OPERAND (varop, 0)) == COMPONENT_REF
6023 && DECL_BIT_FIELD(TREE_OPERAND
6024 (TREE_OPERAND (varop, 0), 1)))
6027 = TREE_INT_CST_LOW (DECL_SIZE
6029 (TREE_OPERAND (varop, 0), 1)));
6030 tree mask, unsigned_type;
6032 tree folded_compare;
6034 /* First check whether the comparison would come out
6035 always the same. If we don't do that we would
6036 change the meaning with the masking. */
6037 if (constopnum == 0)
6038 folded_compare = fold (build (code, type, constop,
6039 TREE_OPERAND (varop, 0)));
6041 folded_compare = fold (build (code, type,
6042 TREE_OPERAND (varop, 0),
6044 if (integer_zerop (folded_compare)
6045 || integer_onep (folded_compare))
6046 return omit_one_operand (type, folded_compare, varop);
6048 unsigned_type = type_for_size (size, 1);
6049 precision = TYPE_PRECISION (unsigned_type);
6050 mask = build_int_2 (~0, ~0);
6051 TREE_TYPE (mask) = unsigned_type;
6052 force_fit_type (mask, 0);
6053 mask = const_binop (RSHIFT_EXPR, mask,
6054 size_int (precision - size), 0);
6055 newconst = fold (build (BIT_AND_EXPR,
6056 TREE_TYPE (varop), newconst,
6057 convert (TREE_TYPE (varop),
6062 t = build (code, type, TREE_OPERAND (t, 0),
6063 TREE_OPERAND (t, 1));
6064 TREE_OPERAND (t, constopnum) = newconst;
6068 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
6070 if (POINTER_TYPE_P (TREE_TYPE (varop))
6071 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
6072 && (code == EQ_EXPR || code == NE_EXPR)))
6075 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
6076 constop, TREE_OPERAND (varop, 1)));
6077 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
6079 if (TREE_CODE (TREE_OPERAND (varop, 0)) == COMPONENT_REF
6080 && DECL_BIT_FIELD(TREE_OPERAND
6081 (TREE_OPERAND (varop, 0), 1)))
6084 = TREE_INT_CST_LOW (DECL_SIZE
6086 (TREE_OPERAND (varop, 0), 1)));
6087 tree mask, unsigned_type;
6089 tree folded_compare;
6091 if (constopnum == 0)
6092 folded_compare = fold (build (code, type, constop,
6093 TREE_OPERAND (varop, 0)));
6095 folded_compare = fold (build (code, type,
6096 TREE_OPERAND (varop, 0),
6098 if (integer_zerop (folded_compare)
6099 || integer_onep (folded_compare))
6100 return omit_one_operand (type, folded_compare, varop);
6102 unsigned_type = type_for_size (size, 1);
6103 precision = TYPE_PRECISION (unsigned_type);
6104 mask = build_int_2 (~0, ~0);
6105 TREE_TYPE (mask) = TREE_TYPE (varop);
6106 force_fit_type (mask, 0);
6107 mask = const_binop (RSHIFT_EXPR, mask,
6108 size_int (precision - size), 0);
6109 newconst = fold (build (BIT_AND_EXPR,
6110 TREE_TYPE (varop), newconst,
6111 convert (TREE_TYPE (varop),
6116 t = build (code, type, TREE_OPERAND (t, 0),
6117 TREE_OPERAND (t, 1));
6118 TREE_OPERAND (t, constopnum) = newconst;
6124 /* Change X >= CST to X > (CST - 1) if CST is positive. */
6125 if (TREE_CODE (arg1) == INTEGER_CST
6126 && TREE_CODE (arg0) != INTEGER_CST
6127 && tree_int_cst_sgn (arg1) > 0)
6129 switch (TREE_CODE (t))
6133 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
6134 t = build (code, type, TREE_OPERAND (t, 0), arg1);
6139 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
6140 t = build (code, type, TREE_OPERAND (t, 0), arg1);
6148 /* If this is an EQ or NE comparison of a constant with a PLUS_EXPR or
6149 a MINUS_EXPR of a constant, we can convert it into a comparison with
6150 a revised constant as long as no overflow occurs. */
6151 if ((code == EQ_EXPR || code == NE_EXPR)
6152 && TREE_CODE (arg1) == INTEGER_CST
6153 && (TREE_CODE (arg0) == PLUS_EXPR
6154 || TREE_CODE (arg0) == MINUS_EXPR)
6155 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
6156 && 0 != (tem = const_binop (TREE_CODE (arg0) == PLUS_EXPR
6157 ? MINUS_EXPR : PLUS_EXPR,
6158 arg1, TREE_OPERAND (arg0, 1), 0))
6159 && ! TREE_CONSTANT_OVERFLOW (tem))
6160 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
6162 /* Similarly for a NEGATE_EXPR. */
6163 else if ((code == EQ_EXPR || code == NE_EXPR)
6164 && TREE_CODE (arg0) == NEGATE_EXPR
6165 && TREE_CODE (arg1) == INTEGER_CST
6166 && 0 != (tem = negate_expr (arg1))
6167 && TREE_CODE (tem) == INTEGER_CST
6168 && ! TREE_CONSTANT_OVERFLOW (tem))
6169 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
6171 /* If we have X - Y == 0, we can convert that to X == Y and similarly
6172 for !=. Don't do this for ordered comparisons due to overflow. */
6173 else if ((code == NE_EXPR || code == EQ_EXPR)
6174 && integer_zerop (arg1) && TREE_CODE (arg0) == MINUS_EXPR)
6175 return fold (build (code, type,
6176 TREE_OPERAND (arg0, 0), TREE_OPERAND (arg0, 1)));
6178 /* If we are widening one operand of an integer comparison,
6179 see if the other operand is similarly being widened. Perhaps we
6180 can do the comparison in the narrower type. */
6181 else if (TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE
6182 && TREE_CODE (arg0) == NOP_EXPR
6183 && (tem = get_unwidened (arg0, NULL_TREE)) != arg0
6184 && (t1 = get_unwidened (arg1, TREE_TYPE (tem))) != 0
6185 && (TREE_TYPE (t1) == TREE_TYPE (tem)
6186 || (TREE_CODE (t1) == INTEGER_CST
6187 && int_fits_type_p (t1, TREE_TYPE (tem)))))
6188 return fold (build (code, type, tem, convert (TREE_TYPE (tem), t1)));
6190 /* If this is comparing a constant with a MIN_EXPR or a MAX_EXPR of a
6191 constant, we can simplify it. */
6192 else if (TREE_CODE (arg1) == INTEGER_CST
6193 && (TREE_CODE (arg0) == MIN_EXPR
6194 || TREE_CODE (arg0) == MAX_EXPR)
6195 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
6196 return optimize_minmax_comparison (t);
6198 /* If we are comparing an ABS_EXPR with a constant, we can
6199 convert all the cases into explicit comparisons, but they may
6200 well not be faster than doing the ABS and one comparison.
6201 But ABS (X) <= C is a range comparison, which becomes a subtraction
6202 and a comparison, and is probably faster. */
6203 else if (code == LE_EXPR && TREE_CODE (arg1) == INTEGER_CST
6204 && TREE_CODE (arg0) == ABS_EXPR
6205 && ! TREE_SIDE_EFFECTS (arg0)
6206 && (0 != (tem = negate_expr (arg1)))
6207 && TREE_CODE (tem) == INTEGER_CST
6208 && ! TREE_CONSTANT_OVERFLOW (tem))
6209 return fold (build (TRUTH_ANDIF_EXPR, type,
6210 build (GE_EXPR, type, TREE_OPERAND (arg0, 0), tem),
6211 build (LE_EXPR, type,
6212 TREE_OPERAND (arg0, 0), arg1)));
6214 /* If this is an EQ or NE comparison with zero and ARG0 is
6215 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
6216 two operations, but the latter can be done in one less insn
6217 on machines that have only two-operand insns or on which a
6218 constant cannot be the first operand. */
6219 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
6220 && TREE_CODE (arg0) == BIT_AND_EXPR)
6222 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
6223 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
6225 fold (build (code, type,
6226 build (BIT_AND_EXPR, TREE_TYPE (arg0),
6228 TREE_TYPE (TREE_OPERAND (arg0, 0)),
6229 TREE_OPERAND (arg0, 1),
6230 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
6231 convert (TREE_TYPE (arg0),
6234 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
6235 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
6237 fold (build (code, type,
6238 build (BIT_AND_EXPR, TREE_TYPE (arg0),
6240 TREE_TYPE (TREE_OPERAND (arg0, 1)),
6241 TREE_OPERAND (arg0, 0),
6242 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
6243 convert (TREE_TYPE (arg0),
6248 /* If this is an NE or EQ comparison of zero against the result of a
6249 signed MOD operation whose second operand is a power of 2, make
6250 the MOD operation unsigned since it is simpler and equivalent. */
6251 if ((code == NE_EXPR || code == EQ_EXPR)
6252 && integer_zerop (arg1)
6253 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
6254 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
6255 || TREE_CODE (arg0) == CEIL_MOD_EXPR
6256 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
6257 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
6258 && integer_pow2p (TREE_OPERAND (arg0, 1)))
6260 tree newtype = unsigned_type (TREE_TYPE (arg0));
6261 tree newmod = build (TREE_CODE (arg0), newtype,
6262 convert (newtype, TREE_OPERAND (arg0, 0)),
6263 convert (newtype, TREE_OPERAND (arg0, 1)));
6265 return build (code, type, newmod, convert (newtype, arg1));
6268 /* If this is an NE comparison of zero with an AND of one, remove the
6269 comparison since the AND will give the correct value. */
6270 if (code == NE_EXPR && integer_zerop (arg1)
6271 && TREE_CODE (arg0) == BIT_AND_EXPR
6272 && integer_onep (TREE_OPERAND (arg0, 1)))
6273 return convert (type, arg0);
6275 /* If we have (A & C) == C where C is a power of 2, convert this into
6276 (A & C) != 0. Similarly for NE_EXPR. */
6277 if ((code == EQ_EXPR || code == NE_EXPR)
6278 && TREE_CODE (arg0) == BIT_AND_EXPR
6279 && integer_pow2p (TREE_OPERAND (arg0, 1))
6280 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
6281 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
6282 arg0, integer_zero_node);
6284 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
6285 and similarly for >= into !=. */
6286 if ((code == LT_EXPR || code == GE_EXPR)
6287 && TREE_UNSIGNED (TREE_TYPE (arg0))
6288 && TREE_CODE (arg1) == LSHIFT_EXPR
6289 && integer_onep (TREE_OPERAND (arg1, 0)))
6290 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
6291 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
6292 TREE_OPERAND (arg1, 1)),
6293 convert (TREE_TYPE (arg0), integer_zero_node));
6295 else if ((code == LT_EXPR || code == GE_EXPR)
6296 && TREE_UNSIGNED (TREE_TYPE (arg0))
6297 && (TREE_CODE (arg1) == NOP_EXPR
6298 || TREE_CODE (arg1) == CONVERT_EXPR)
6299 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
6300 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
6302 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
6303 convert (TREE_TYPE (arg0),
6304 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
6305 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
6306 convert (TREE_TYPE (arg0), integer_zero_node));
6308 /* Simplify comparison of something with itself. (For IEEE
6309 floating-point, we can only do some of these simplifications.) */
6310 if (operand_equal_p (arg0, arg1, 0))
6317 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
6318 return constant_boolean_node (1, type);
6320 TREE_SET_CODE (t, code);
6324 /* For NE, we can only do this simplification if integer. */
6325 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
6327 /* ... fall through ... */
6330 return constant_boolean_node (0, type);
6336 /* An unsigned comparison against 0 can be simplified. */
6337 if (integer_zerop (arg1)
6338 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
6339 || POINTER_TYPE_P (TREE_TYPE (arg1)))
6340 && TREE_UNSIGNED (TREE_TYPE (arg1)))
6342 switch (TREE_CODE (t))
6346 TREE_SET_CODE (t, NE_EXPR);
6350 TREE_SET_CODE (t, EQ_EXPR);
6353 return omit_one_operand (type,
6354 convert (type, integer_one_node),
6357 return omit_one_operand (type,
6358 convert (type, integer_zero_node),
6365 /* Comparisons with the highest or lowest possible integer of
6366 the specified size will have known values and an unsigned
6367 <= 0x7fffffff can be simplified. */
6369 int width = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (arg1)));
6371 if (TREE_CODE (arg1) == INTEGER_CST
6372 && ! TREE_CONSTANT_OVERFLOW (arg1)
6373 && width <= HOST_BITS_PER_WIDE_INT
6374 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
6375 || POINTER_TYPE_P (TREE_TYPE (arg1))))
6377 if (TREE_INT_CST_HIGH (arg1) == 0
6378 && (TREE_INT_CST_LOW (arg1)
6379 == ((HOST_WIDE_INT) 1 << (width - 1)) - 1)
6380 && ! TREE_UNSIGNED (TREE_TYPE (arg1)))
6381 switch (TREE_CODE (t))
6384 return omit_one_operand (type,
6385 convert (type, integer_zero_node),
6388 TREE_SET_CODE (t, EQ_EXPR);
6392 return omit_one_operand (type,
6393 convert (type, integer_one_node),
6396 TREE_SET_CODE (t, NE_EXPR);
6403 else if (TREE_INT_CST_HIGH (arg1) == -1
6404 && (- TREE_INT_CST_LOW (arg1)
6405 == ((HOST_WIDE_INT) 1 << (width - 1)))
6406 && ! TREE_UNSIGNED (TREE_TYPE (arg1)))
6407 switch (TREE_CODE (t))
6410 return omit_one_operand (type,
6411 convert (type, integer_zero_node),
6414 TREE_SET_CODE (t, EQ_EXPR);
6418 return omit_one_operand (type,
6419 convert (type, integer_one_node),
6422 TREE_SET_CODE (t, NE_EXPR);
6429 else if (TREE_INT_CST_HIGH (arg1) == 0
6430 && (TREE_INT_CST_LOW (arg1)
6431 == ((HOST_WIDE_INT) 1 << (width - 1)) - 1)
6432 && TREE_UNSIGNED (TREE_TYPE (arg1)))
6434 switch (TREE_CODE (t))
6437 return fold (build (GE_EXPR, type,
6438 convert (signed_type (TREE_TYPE (arg0)),
6440 convert (signed_type (TREE_TYPE (arg1)),
6441 integer_zero_node)));
6443 return fold (build (LT_EXPR, type,
6444 convert (signed_type (TREE_TYPE (arg0)),
6446 convert (signed_type (TREE_TYPE (arg1)),
6447 integer_zero_node)));
6455 /* If we are comparing an expression that just has comparisons
6456 of two integer values, arithmetic expressions of those comparisons,
6457 and constants, we can simplify it. There are only three cases
6458 to check: the two values can either be equal, the first can be
6459 greater, or the second can be greater. Fold the expression for
6460 those three values. Since each value must be 0 or 1, we have
6461 eight possibilities, each of which corresponds to the constant 0
6462 or 1 or one of the six possible comparisons.
6464 This handles common cases like (a > b) == 0 but also handles
6465 expressions like ((x > y) - (y > x)) > 0, which supposedly
6466 occur in macroized code. */
6468 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
6470 tree cval1 = 0, cval2 = 0;
6473 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
6474 /* Don't handle degenerate cases here; they should already
6475 have been handled anyway. */
6476 && cval1 != 0 && cval2 != 0
6477 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
6478 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
6479 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
6480 && TYPE_MAX_VALUE (TREE_TYPE (cval1))
6481 && TYPE_MAX_VALUE (TREE_TYPE (cval2))
6482 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
6483 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
6485 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
6486 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
6488 /* We can't just pass T to eval_subst in case cval1 or cval2
6489 was the same as ARG1. */
6492 = fold (build (code, type,
6493 eval_subst (arg0, cval1, maxval, cval2, minval),
6496 = fold (build (code, type,
6497 eval_subst (arg0, cval1, maxval, cval2, maxval),
6500 = fold (build (code, type,
6501 eval_subst (arg0, cval1, minval, cval2, maxval),
6504 /* All three of these results should be 0 or 1. Confirm they
6505 are. Then use those values to select the proper code
6508 if ((integer_zerop (high_result)
6509 || integer_onep (high_result))
6510 && (integer_zerop (equal_result)
6511 || integer_onep (equal_result))
6512 && (integer_zerop (low_result)
6513 || integer_onep (low_result)))
6515 /* Make a 3-bit mask with the high-order bit being the
6516 value for `>', the next for '=', and the low for '<'. */
6517 switch ((integer_onep (high_result) * 4)
6518 + (integer_onep (equal_result) * 2)
6519 + integer_onep (low_result))
6523 return omit_one_operand (type, integer_zero_node, arg0);
6544 return omit_one_operand (type, integer_one_node, arg0);
6547 t = build (code, type, cval1, cval2);
6549 return save_expr (t);
6556 /* If this is a comparison of a field, we may be able to simplify it. */
6557 if ((TREE_CODE (arg0) == COMPONENT_REF
6558 || TREE_CODE (arg0) == BIT_FIELD_REF)
6559 && (code == EQ_EXPR || code == NE_EXPR)
6560 /* Handle the constant case even without -O
6561 to make sure the warnings are given. */
6562 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
6564 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
6568 /* If this is a comparison of complex values and either or both sides
6569 are a COMPLEX_EXPR or COMPLEX_CST, it is best to split up the
6570 comparisons and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR.
6571 This may prevent needless evaluations. */
6572 if ((code == EQ_EXPR || code == NE_EXPR)
6573 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
6574 && (TREE_CODE (arg0) == COMPLEX_EXPR
6575 || TREE_CODE (arg1) == COMPLEX_EXPR
6576 || TREE_CODE (arg0) == COMPLEX_CST
6577 || TREE_CODE (arg1) == COMPLEX_CST))
6579 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
6580 tree real0, imag0, real1, imag1;
6582 arg0 = save_expr (arg0);
6583 arg1 = save_expr (arg1);
6584 real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
6585 imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
6586 real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
6587 imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
6589 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
6592 fold (build (code, type, real0, real1)),
6593 fold (build (code, type, imag0, imag1))));
6596 /* From here on, the only cases we handle are when the result is
6597 known to be a constant.
6599 To compute GT, swap the arguments and do LT.
6600 To compute GE, do LT and invert the result.
6601 To compute LE, swap the arguments, do LT and invert the result.
6602 To compute NE, do EQ and invert the result.
6604 Therefore, the code below must handle only EQ and LT. */
6606 if (code == LE_EXPR || code == GT_EXPR)
6608 tem = arg0, arg0 = arg1, arg1 = tem;
6609 code = swap_tree_comparison (code);
6612 /* Note that it is safe to invert for real values here because we
6613 will check below in the one case that it matters. */
6617 if (code == NE_EXPR || code == GE_EXPR)
6620 code = invert_tree_comparison (code);
6623 /* Compute a result for LT or EQ if args permit;
6624 otherwise return T. */
6625 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
6627 if (code == EQ_EXPR)
6628 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
6629 == TREE_INT_CST_LOW (arg1))
6630 && (TREE_INT_CST_HIGH (arg0)
6631 == TREE_INT_CST_HIGH (arg1)),
6634 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
6635 ? INT_CST_LT_UNSIGNED (arg0, arg1)
6636 : INT_CST_LT (arg0, arg1)),
6640 #if 0 /* This is no longer useful, but breaks some real code. */
6641 /* Assume a nonexplicit constant cannot equal an explicit one,
6642 since such code would be undefined anyway.
6643 Exception: on sysvr4, using #pragma weak,
6644 a label can come out as 0. */
6645 else if (TREE_CODE (arg1) == INTEGER_CST
6646 && !integer_zerop (arg1)
6647 && TREE_CONSTANT (arg0)
6648 && TREE_CODE (arg0) == ADDR_EXPR
6650 t1 = build_int_2 (0, 0);
6652 /* Two real constants can be compared explicitly. */
6653 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
6655 /* If either operand is a NaN, the result is false with two
6656 exceptions: First, an NE_EXPR is true on NaNs, but that case
6657 is already handled correctly since we will be inverting the
6658 result for NE_EXPR. Second, if we had inverted a LE_EXPR
6659 or a GE_EXPR into a LT_EXPR, we must return true so that it
6660 will be inverted into false. */
6662 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
6663 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
6664 t1 = build_int_2 (invert && code == LT_EXPR, 0);
6666 else if (code == EQ_EXPR)
6667 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
6668 TREE_REAL_CST (arg1)),
6671 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
6672 TREE_REAL_CST (arg1)),
6676 if (t1 == NULL_TREE)
6680 TREE_INT_CST_LOW (t1) ^= 1;
6682 TREE_TYPE (t1) = type;
6683 if (TREE_CODE (type) == BOOLEAN_TYPE)
6684 return truthvalue_conversion (t1);
6688 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
6689 so all simple results must be passed through pedantic_non_lvalue. */
6690 if (TREE_CODE (arg0) == INTEGER_CST)
6691 return pedantic_non_lvalue
6692 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
6693 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
6694 return pedantic_omit_one_operand (type, arg1, arg0);
6696 /* If the second operand is zero, invert the comparison and swap
6697 the second and third operands. Likewise if the second operand
6698 is constant and the third is not or if the third operand is
6699 equivalent to the first operand of the comparison. */
6701 if (integer_zerop (arg1)
6702 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
6703 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
6704 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
6705 TREE_OPERAND (t, 2),
6706 TREE_OPERAND (arg0, 1))))
6708 /* See if this can be inverted. If it can't, possibly because
6709 it was a floating-point inequality comparison, don't do
6711 tem = invert_truthvalue (arg0);
6713 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
6715 t = build (code, type, tem,
6716 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
6718 /* arg1 should be the first argument of the new T. */
6719 arg1 = TREE_OPERAND (t, 1);
6724 /* If we have A op B ? A : C, we may be able to convert this to a
6725 simpler expression, depending on the operation and the values
6726 of B and C. IEEE floating point prevents this though,
6727 because A or B might be -0.0 or a NaN. */
6729 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
6730 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
6731 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
6733 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
6734 arg1, TREE_OPERAND (arg0, 1)))
6736 tree arg2 = TREE_OPERAND (t, 2);
6737 enum tree_code comp_code = TREE_CODE (arg0);
6741 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
6742 depending on the comparison operation. */
6743 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
6744 ? real_zerop (TREE_OPERAND (arg0, 1))
6745 : integer_zerop (TREE_OPERAND (arg0, 1)))
6746 && TREE_CODE (arg2) == NEGATE_EXPR
6747 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
6751 return pedantic_non_lvalue (negate_expr (arg1));
6753 return pedantic_non_lvalue (convert (type, arg1));
6756 if (TREE_UNSIGNED (TREE_TYPE (arg1)))
6757 arg1 = convert (signed_type (TREE_TYPE (arg1)), arg1);
6758 return pedantic_non_lvalue
6759 (convert (type, fold (build1 (ABS_EXPR,
6760 TREE_TYPE (arg1), arg1))));
6763 if (TREE_UNSIGNED (TREE_TYPE (arg1)))
6764 arg1 = convert (signed_type (TREE_TYPE (arg1)), arg1);
6765 return pedantic_non_lvalue
6766 (negate_expr (convert (type,
6767 fold (build1 (ABS_EXPR,
6774 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
6777 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
6779 if (comp_code == NE_EXPR)
6780 return pedantic_non_lvalue (convert (type, arg1));
6781 else if (comp_code == EQ_EXPR)
6782 return pedantic_non_lvalue (convert (type, integer_zero_node));
6785 /* If this is A op B ? A : B, this is either A, B, min (A, B),
6786 or max (A, B), depending on the operation. */
6788 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
6789 arg2, TREE_OPERAND (arg0, 0)))
6791 tree comp_op0 = TREE_OPERAND (arg0, 0);
6792 tree comp_op1 = TREE_OPERAND (arg0, 1);
6793 tree comp_type = TREE_TYPE (comp_op0);
6798 return pedantic_non_lvalue (convert (type, arg2));
6800 return pedantic_non_lvalue (convert (type, arg1));
6803 /* In C++ a ?: expression can be an lvalue, so put the
6804 operand which will be used if they are equal first
6805 so that we can convert this back to the
6806 corresponding COND_EXPR. */
6807 return pedantic_non_lvalue
6808 (convert (type, (fold (build (MIN_EXPR, comp_type,
6809 (comp_code == LE_EXPR
6810 ? comp_op0 : comp_op1),
6811 (comp_code == LE_EXPR
6812 ? comp_op1 : comp_op0))))));
6816 return pedantic_non_lvalue
6817 (convert (type, fold (build (MAX_EXPR, comp_type,
6818 (comp_code == GE_EXPR
6819 ? comp_op0 : comp_op1),
6820 (comp_code == GE_EXPR
6821 ? comp_op1 : comp_op0)))));
6828 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
6829 we might still be able to simplify this. For example,
6830 if C1 is one less or one more than C2, this might have started
6831 out as a MIN or MAX and been transformed by this function.
6832 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
6834 if (INTEGRAL_TYPE_P (type)
6835 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
6836 && TREE_CODE (arg2) == INTEGER_CST)
6840 /* We can replace A with C1 in this case. */
6841 arg1 = convert (type, TREE_OPERAND (arg0, 1));
6842 t = build (code, type, TREE_OPERAND (t, 0), arg1,
6843 TREE_OPERAND (t, 2));
6847 /* If C1 is C2 + 1, this is min(A, C2). */
6848 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
6849 && operand_equal_p (TREE_OPERAND (arg0, 1),
6850 const_binop (PLUS_EXPR, arg2,
6851 integer_one_node, 0), 1))
6852 return pedantic_non_lvalue
6853 (fold (build (MIN_EXPR, type, arg1, arg2)));
6857 /* If C1 is C2 - 1, this is min(A, C2). */
6858 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
6859 && operand_equal_p (TREE_OPERAND (arg0, 1),
6860 const_binop (MINUS_EXPR, arg2,
6861 integer_one_node, 0), 1))
6862 return pedantic_non_lvalue
6863 (fold (build (MIN_EXPR, type, arg1, arg2)));
6867 /* If C1 is C2 - 1, this is max(A, C2). */
6868 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
6869 && operand_equal_p (TREE_OPERAND (arg0, 1),
6870 const_binop (MINUS_EXPR, arg2,
6871 integer_one_node, 0), 1))
6872 return pedantic_non_lvalue
6873 (fold (build (MAX_EXPR, type, arg1, arg2)));
6877 /* If C1 is C2 + 1, this is max(A, C2). */
6878 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
6879 && operand_equal_p (TREE_OPERAND (arg0, 1),
6880 const_binop (PLUS_EXPR, arg2,
6881 integer_one_node, 0), 1))
6882 return pedantic_non_lvalue
6883 (fold (build (MAX_EXPR, type, arg1, arg2)));
6892 /* If the second operand is simpler than the third, swap them
6893 since that produces better jump optimization results. */
6894 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
6895 || TREE_CODE (arg1) == SAVE_EXPR)
6896 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
6897 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
6898 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
6900 /* See if this can be inverted. If it can't, possibly because
6901 it was a floating-point inequality comparison, don't do
6903 tem = invert_truthvalue (arg0);
6905 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
6907 t = build (code, type, tem,
6908 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
6910 /* arg1 should be the first argument of the new T. */
6911 arg1 = TREE_OPERAND (t, 1);
6916 /* Convert A ? 1 : 0 to simply A. */
6917 if (integer_onep (TREE_OPERAND (t, 1))
6918 && integer_zerop (TREE_OPERAND (t, 2))
6919 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
6920 call to fold will try to move the conversion inside
6921 a COND, which will recurse. In that case, the COND_EXPR
6922 is probably the best choice, so leave it alone. */
6923 && type == TREE_TYPE (arg0))
6924 return pedantic_non_lvalue (arg0);
6926 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
6927 operation is simply A & 2. */
6929 if (integer_zerop (TREE_OPERAND (t, 2))
6930 && TREE_CODE (arg0) == NE_EXPR
6931 && integer_zerop (TREE_OPERAND (arg0, 1))
6932 && integer_pow2p (arg1)
6933 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
6934 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
6936 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
6941 /* When pedantic, a compound expression can be neither an lvalue
6942 nor an integer constant expression. */
6943 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
6945 /* Don't let (0, 0) be null pointer constant. */
6946 if (integer_zerop (arg1))
6947 return build1 (NOP_EXPR, TREE_TYPE (arg1), arg1);
6952 return build_complex (type, arg0, arg1);
6956 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
6958 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
6959 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
6960 TREE_OPERAND (arg0, 1));
6961 else if (TREE_CODE (arg0) == COMPLEX_CST)
6962 return TREE_REALPART (arg0);
6963 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
6964 return fold (build (TREE_CODE (arg0), type,
6965 fold (build1 (REALPART_EXPR, type,
6966 TREE_OPERAND (arg0, 0))),
6967 fold (build1 (REALPART_EXPR,
6968 type, TREE_OPERAND (arg0, 1)))));
6972 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
6973 return convert (type, integer_zero_node);
6974 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
6975 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
6976 TREE_OPERAND (arg0, 0));
6977 else if (TREE_CODE (arg0) == COMPLEX_CST)
6978 return TREE_IMAGPART (arg0);
6979 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
6980 return fold (build (TREE_CODE (arg0), type,
6981 fold (build1 (IMAGPART_EXPR, type,
6982 TREE_OPERAND (arg0, 0))),
6983 fold (build1 (IMAGPART_EXPR, type,
6984 TREE_OPERAND (arg0, 1)))));
6987 /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
6989 case CLEANUP_POINT_EXPR:
6990 if (! has_cleanups (arg0))
6991 return TREE_OPERAND (t, 0);
6994 enum tree_code code0 = TREE_CODE (arg0);
6995 int kind0 = TREE_CODE_CLASS (code0);
6996 tree arg00 = TREE_OPERAND (arg0, 0);
6999 if (kind0 == '1' || code0 == TRUTH_NOT_EXPR)
7000 return fold (build1 (code0, type,
7001 fold (build1 (CLEANUP_POINT_EXPR,
7002 TREE_TYPE (arg00), arg00))));
7004 if (kind0 == '<' || kind0 == '2'
7005 || code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR
7006 || code0 == TRUTH_AND_EXPR || code0 == TRUTH_OR_EXPR
7007 || code0 == TRUTH_XOR_EXPR)
7009 arg01 = TREE_OPERAND (arg0, 1);
7011 if (TREE_CONSTANT (arg00)
7012 || ((code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR)
7013 && ! has_cleanups (arg00)))
7014 return fold (build (code0, type, arg00,
7015 fold (build1 (CLEANUP_POINT_EXPR,
7016 TREE_TYPE (arg01), arg01))));
7018 if (TREE_CONSTANT (arg01))
7019 return fold (build (code0, type,
7020 fold (build1 (CLEANUP_POINT_EXPR,
7021 TREE_TYPE (arg00), arg00)),
7030 } /* switch (code) */
7033 /* Determine if first argument is a multiple of second argument. Return 0 if
7034 it is not, or we cannot easily determined it to be.
7036 An example of the sort of thing we care about (at this point; this routine
7037 could surely be made more general, and expanded to do what the *_DIV_EXPR's
7038 fold cases do now) is discovering that
7040 SAVE_EXPR (I) * SAVE_EXPR (J * 8)
7046 when we know that the two SAVE_EXPR (J * 8) nodes are the same node.
7048 This code also handles discovering that
7050 SAVE_EXPR (I) * SAVE_EXPR (J * 8)
7052 is a multiple of 8 so we don't have to worry about dealing with a
7055 Note that we *look* inside a SAVE_EXPR only to determine how it was
7056 calculated; it is not safe for fold to do much of anything else with the
7057 internals of a SAVE_EXPR, since it cannot know when it will be evaluated
7058 at run time. For example, the latter example above *cannot* be implemented
7059 as SAVE_EXPR (I) * J or any variant thereof, since the value of J at
7060 evaluation time of the original SAVE_EXPR is not necessarily the same at
7061 the time the new expression is evaluated. The only optimization of this
7062 sort that would be valid is changing
7064 SAVE_EXPR (I) * SAVE_EXPR (SAVE_EXPR (J) * 8)
7068 SAVE_EXPR (I) * SAVE_EXPR (J)
7070 (where the same SAVE_EXPR (J) is used in the original and the
7071 transformed version). */
7074 multiple_of_p (type, top, bottom)
7079 if (operand_equal_p (top, bottom, 0))
7082 if (TREE_CODE (type) != INTEGER_TYPE)
7085 switch (TREE_CODE (top))
7088 return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom)
7089 || multiple_of_p (type, TREE_OPERAND (top, 1), bottom));
7093 return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom)
7094 && multiple_of_p (type, TREE_OPERAND (top, 1), bottom));
7097 /* Can't handle conversions from non-integral or wider integral type. */
7098 if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (top, 0))) != INTEGER_TYPE)
7099 || (TYPE_PRECISION (type)
7100 < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (top, 0)))))
7103 /* .. fall through ... */
7106 return multiple_of_p (type, TREE_OPERAND (top, 0), bottom);
7109 if ((TREE_CODE (bottom) != INTEGER_CST)
7110 || (tree_int_cst_sgn (top) < 0)
7111 || (tree_int_cst_sgn (bottom) < 0))
7113 return integer_zerop (const_binop (TRUNC_MOD_EXPR,