1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
63 neg_const_int (enum machine_mode mode, rtx i)
65 return gen_int_mode (- INTVAL (i), mode);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
88 /* Handle addition and subtraction specially. Otherwise, just form
91 if (code == PLUS || code == MINUS)
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x)
107 enum machine_mode cmode;
109 switch (GET_CODE (x))
115 /* Handle float extensions of constant pool references. */
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
171 return gen_rtx_fmt_e (code, mode, op);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
200 if (cmp_mode == VOIDmode)
201 cmp_mode = GET_MODE (op0);
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op1);
205 if (cmp_mode != VOIDmode)
207 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
215 if (tem == const0_rtx)
216 return CONST0_RTX (mode);
217 if (tem != const_true_rtx)
219 val = FLOAT_STORE_FLAG_VALUE (mode);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0, op1)
229 || (op0 == const0_rtx && op1 != const0_rtx))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
234 return simplify_gen_relational (code, mode, VOIDmode,
235 XEXP (op0, 0), XEXP (op0, 1));
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
242 if (GET_MODE (op0) == mode)
244 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
245 XEXP (op0, 0), XEXP (op0, 1));
249 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
251 return simplify_gen_relational (new, mode, VOIDmode,
252 XEXP (op0, 0), XEXP (op0, 1));
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x, rtx old, rtx new)
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
268 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
269 to build a new expression substituting recursively. If we can't do
270 anything, return our input. */
275 switch (GET_RTX_CLASS (code))
279 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
280 rtx op = (XEXP (x, 0) == old
281 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
283 return simplify_gen_unary (code, mode, op, op_mode);
289 simplify_gen_binary (code, mode,
290 simplify_replace_rtx (XEXP (x, 0), old, new),
291 simplify_replace_rtx (XEXP (x, 1), old, new));
294 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
295 ? GET_MODE (XEXP (x, 0))
296 : GET_MODE (XEXP (x, 1)));
297 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
298 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
306 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
309 simplify_gen_ternary (code, mode,
314 simplify_replace_rtx (XEXP (x, 1), old, new),
315 simplify_replace_rtx (XEXP (x, 2), old, new));
319 /* The only case we try to handle is a SUBREG. */
323 exp = simplify_gen_subreg (GET_MODE (x),
324 simplify_replace_rtx (SUBREG_REG (x),
326 GET_MODE (SUBREG_REG (x)),
335 return replace_equiv_address_nv (x,
336 simplify_replace_rtx (XEXP (x, 0),
338 else if (code == LO_SUM)
340 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
341 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
343 /* (lo_sum (high x) x) -> x */
344 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return gen_rtx_LO_SUM (mode, op0, op1);
349 else if (code == REG)
351 if (REG_P (old) && REGNO (x) == REGNO (old))
363 /* Try to simplify a unary operation CODE whose output mode is to be
364 MODE with input operand OP whose mode was originally OP_MODE.
365 Return zero if no simplification can be made. */
367 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
368 rtx op, enum machine_mode op_mode)
370 unsigned int width = GET_MODE_BITSIZE (mode);
371 rtx trueop = avoid_constant_pool_reference (op);
373 if (code == VEC_DUPLICATE)
375 if (!VECTOR_MODE_P (mode))
377 if (GET_MODE (trueop) != VOIDmode
378 && !VECTOR_MODE_P (GET_MODE (trueop))
379 && GET_MODE_INNER (mode) != GET_MODE (trueop))
381 if (GET_MODE (trueop) != VOIDmode
382 && VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
385 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
386 || GET_CODE (trueop) == CONST_VECTOR)
388 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
389 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
390 rtvec v = rtvec_alloc (n_elts);
393 if (GET_CODE (trueop) != CONST_VECTOR)
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = trueop;
398 enum machine_mode inmode = GET_MODE (trueop);
399 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
400 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
402 if (in_n_elts >= n_elts || n_elts % in_n_elts)
404 for (i = 0; i < n_elts; i++)
405 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
407 return gen_rtx_CONST_VECTOR (mode, v);
411 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
413 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
414 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
415 enum machine_mode opmode = GET_MODE (trueop);
416 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
417 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
418 rtvec v = rtvec_alloc (n_elts);
421 if (op_n_elts != n_elts)
424 for (i = 0; i < n_elts; i++)
426 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
427 CONST_VECTOR_ELT (trueop, i),
428 GET_MODE_INNER (opmode));
431 RTVEC_ELT (v, i) = x;
433 return gen_rtx_CONST_VECTOR (mode, v);
436 /* The order of these tests is critical so that, for example, we don't
437 check the wrong mode (input vs. output) for a conversion operation,
438 such as FIX. At some point, this should be simplified. */
440 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
441 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
443 HOST_WIDE_INT hv, lv;
446 if (GET_CODE (trueop) == CONST_INT)
447 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
449 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
451 REAL_VALUE_FROM_INT (d, lv, hv, mode);
452 d = real_value_truncate (mode, d);
453 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
455 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
456 && (GET_CODE (trueop) == CONST_DOUBLE
457 || GET_CODE (trueop) == CONST_INT))
459 HOST_WIDE_INT hv, lv;
462 if (GET_CODE (trueop) == CONST_INT)
463 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
465 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
467 if (op_mode == VOIDmode)
469 /* We don't know how to interpret negative-looking numbers in
470 this case, so don't try to fold those. */
474 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
477 hv = 0, lv &= GET_MODE_MASK (op_mode);
479 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
480 d = real_value_truncate (mode, d);
481 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
484 if (GET_CODE (trueop) == CONST_INT
485 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
487 HOST_WIDE_INT arg0 = INTVAL (trueop);
501 val = (arg0 >= 0 ? arg0 : - arg0);
505 /* Don't use ffs here. Instead, get low order bit and then its
506 number. If arg0 is zero, this will return 0, as desired. */
507 arg0 &= GET_MODE_MASK (mode);
508 val = exact_log2 (arg0 & (- arg0)) + 1;
512 arg0 &= GET_MODE_MASK (mode);
513 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
520 arg0 &= GET_MODE_MASK (mode);
523 /* Even if the value at zero is undefined, we have to come
524 up with some replacement. Seems good enough. */
525 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
526 val = GET_MODE_BITSIZE (mode);
529 val = exact_log2 (arg0 & -arg0);
533 arg0 &= GET_MODE_MASK (mode);
536 val++, arg0 &= arg0 - 1;
540 arg0 &= GET_MODE_MASK (mode);
543 val++, arg0 &= arg0 - 1;
552 /* When zero-extending a CONST_INT, we need to know its
554 if (op_mode == VOIDmode)
556 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
558 /* If we were really extending the mode,
559 we would have to distinguish between zero-extension
560 and sign-extension. */
561 if (width != GET_MODE_BITSIZE (op_mode))
565 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
566 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
572 if (op_mode == VOIDmode)
574 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
576 /* If we were really extending the mode,
577 we would have to distinguish between zero-extension
578 and sign-extension. */
579 if (width != GET_MODE_BITSIZE (op_mode))
583 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
586 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
588 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
589 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
606 val = trunc_int_for_mode (val, mode);
608 return GEN_INT (val);
611 /* We can do some operations on integer CONST_DOUBLEs. Also allow
612 for a DImode operation on a CONST_INT. */
613 else if (GET_MODE (trueop) == VOIDmode
614 && width <= HOST_BITS_PER_WIDE_INT * 2
615 && (GET_CODE (trueop) == CONST_DOUBLE
616 || GET_CODE (trueop) == CONST_INT))
618 unsigned HOST_WIDE_INT l1, lv;
619 HOST_WIDE_INT h1, hv;
621 if (GET_CODE (trueop) == CONST_DOUBLE)
622 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
624 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
634 neg_double (l1, h1, &lv, &hv);
639 neg_double (l1, h1, &lv, &hv);
651 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
654 lv = exact_log2 (l1 & -l1) + 1;
660 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
661 - HOST_BITS_PER_WIDE_INT;
663 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
664 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
665 lv = GET_MODE_BITSIZE (mode);
671 lv = exact_log2 (l1 & -l1);
673 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
674 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
675 lv = GET_MODE_BITSIZE (mode);
698 /* This is just a change-of-mode, so do nothing. */
703 if (op_mode == VOIDmode)
706 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
710 lv = l1 & GET_MODE_MASK (op_mode);
714 if (op_mode == VOIDmode
715 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
719 lv = l1 & GET_MODE_MASK (op_mode);
720 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
721 && (lv & ((HOST_WIDE_INT) 1
722 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
723 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
725 hv = HWI_SIGN_EXTEND (lv);
736 return immed_double_const (lv, hv, mode);
739 else if (GET_CODE (trueop) == CONST_DOUBLE
740 && GET_MODE_CLASS (mode) == MODE_FLOAT)
742 REAL_VALUE_TYPE d, t;
743 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
748 if (HONOR_SNANS (mode) && real_isnan (&d))
750 real_sqrt (&t, mode, &d);
754 d = REAL_VALUE_ABS (d);
757 d = REAL_VALUE_NEGATE (d);
760 d = real_value_truncate (mode, d);
763 /* All this does is change the mode. */
766 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
772 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
775 else if (GET_CODE (trueop) == CONST_DOUBLE
776 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
777 && GET_MODE_CLASS (mode) == MODE_INT
778 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
782 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
785 case FIX: i = REAL_VALUE_FIX (d); break;
786 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
790 return gen_int_mode (i, mode);
793 /* This was formerly used only for non-IEEE float.
794 eggert@twinsun.com says it is safe for IEEE also. */
797 enum rtx_code reversed;
800 /* There are some simplifications we can do even if the operands
805 /* (not (not X)) == X. */
806 if (GET_CODE (op) == NOT)
809 /* (not (eq X Y)) == (ne X Y), etc. */
810 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
811 && (mode == BImode || STORE_FLAG_VALUE == -1)
812 && ((reversed = reversed_comparison_code (op, NULL_RTX))
814 return simplify_gen_relational (reversed, mode, VOIDmode,
815 XEXP (op, 0), XEXP (op, 1));
817 /* (not (plus X -1)) can become (neg X). */
818 if (GET_CODE (op) == PLUS
819 && XEXP (op, 1) == constm1_rtx)
820 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
822 /* Similarly, (not (neg X)) is (plus X -1). */
823 if (GET_CODE (op) == NEG)
824 return plus_constant (XEXP (op, 0), -1);
826 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
827 if (GET_CODE (op) == XOR
828 && GET_CODE (XEXP (op, 1)) == CONST_INT
829 && (temp = simplify_unary_operation (NOT, mode,
832 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
835 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
836 operands other than 1, but that is not valid. We could do a
837 similar simplification for (not (lshiftrt C X)) where C is
838 just the sign bit, but this doesn't seem common enough to
840 if (GET_CODE (op) == ASHIFT
841 && XEXP (op, 0) == const1_rtx)
843 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
844 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
847 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
848 by reversing the comparison code if valid. */
849 if (STORE_FLAG_VALUE == -1
850 && GET_RTX_CLASS (GET_CODE (op)) == '<'
851 && (reversed = reversed_comparison_code (op, NULL_RTX))
853 return simplify_gen_relational (reversed, mode, VOIDmode,
854 XEXP (op, 0), XEXP (op, 1));
856 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
857 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
858 so we can perform the above simplification. */
860 if (STORE_FLAG_VALUE == -1
861 && GET_CODE (op) == ASHIFTRT
862 && GET_CODE (XEXP (op, 1)) == CONST_INT
863 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
864 return simplify_gen_relational (GE, mode, VOIDmode,
865 XEXP (op, 0), const0_rtx);
870 /* (neg (neg X)) == X. */
871 if (GET_CODE (op) == NEG)
874 /* (neg (plus X 1)) can become (not X). */
875 if (GET_CODE (op) == PLUS
876 && XEXP (op, 1) == const1_rtx)
877 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
879 /* Similarly, (neg (not X)) is (plus X 1). */
880 if (GET_CODE (op) == NOT)
881 return plus_constant (XEXP (op, 0), 1);
883 /* (neg (minus X Y)) can become (minus Y X). This transformation
884 isn't safe for modes with signed zeros, since if X and Y are
885 both +0, (minus Y X) is the same as (minus X Y). If the
886 rounding mode is towards +infinity (or -infinity) then the two
887 expressions will be rounded differently. */
888 if (GET_CODE (op) == MINUS
889 && !HONOR_SIGNED_ZEROS (mode)
890 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
891 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
894 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
895 if (GET_CODE (op) == PLUS
896 && !HONOR_SIGNED_ZEROS (mode)
897 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
899 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
900 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
903 /* (neg (mult A B)) becomes (mult (neg A) B).
904 This works even for floating-point values. */
905 if (GET_CODE (op) == MULT
906 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
908 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
909 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
912 /* NEG commutes with ASHIFT since it is multiplication. Only do
913 this if we can then eliminate the NEG (e.g., if the operand
915 if (GET_CODE (op) == ASHIFT)
917 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
920 return simplify_gen_binary (ASHIFT, mode, temp,
927 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
928 becomes just the MINUS if its mode is MODE. This allows
929 folding switch statements on machines using casesi (such as
931 if (GET_CODE (op) == TRUNCATE
932 && GET_MODE (XEXP (op, 0)) == mode
933 && GET_CODE (XEXP (op, 0)) == MINUS
934 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
935 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
938 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
939 if (! POINTERS_EXTEND_UNSIGNED
940 && mode == Pmode && GET_MODE (op) == ptr_mode
942 || (GET_CODE (op) == SUBREG
943 && GET_CODE (SUBREG_REG (op)) == REG
944 && REG_POINTER (SUBREG_REG (op))
945 && GET_MODE (SUBREG_REG (op)) == Pmode)))
946 return convert_memory_address (Pmode, op);
950 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
952 if (POINTERS_EXTEND_UNSIGNED > 0
953 && mode == Pmode && GET_MODE (op) == ptr_mode
955 || (GET_CODE (op) == SUBREG
956 && GET_CODE (SUBREG_REG (op)) == REG
957 && REG_POINTER (SUBREG_REG (op))
958 && GET_MODE (SUBREG_REG (op)) == Pmode)))
959 return convert_memory_address (Pmode, op);
971 /* Subroutine of simplify_associative_operation. Return true if rtx OP
972 is a suitable integer or floating point immediate constant. */
974 associative_constant_p (rtx op)
976 if (GET_CODE (op) == CONST_INT
977 || GET_CODE (op) == CONST_DOUBLE)
979 op = avoid_constant_pool_reference (op);
980 return GET_CODE (op) == CONST_INT
981 || GET_CODE (op) == CONST_DOUBLE;
984 /* Subroutine of simplify_binary_operation to simplify an associative
985 binary operation CODE with result mode MODE, operating on OP0 and OP1.
986 Return 0 if no simplification is possible. */
988 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
993 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
994 if (GET_CODE (op0) == code
995 && associative_constant_p (op1)
996 && associative_constant_p (XEXP (op0, 1)))
998 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1001 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1004 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1005 if (GET_CODE (op0) == code
1006 && GET_CODE (op1) == code
1007 && associative_constant_p (XEXP (op0, 1))
1008 && associative_constant_p (XEXP (op1, 1)))
1010 rtx c = simplify_binary_operation (code, mode,
1011 XEXP (op0, 1), XEXP (op1, 1));
1014 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1015 return simplify_gen_binary (code, mode, tem, c);
1018 /* Canonicalize (x op c) op y as (x op y) op c. */
1019 if (GET_CODE (op0) == code
1020 && associative_constant_p (XEXP (op0, 1)))
1022 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1023 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1026 /* Canonicalize x op (y op c) as (x op y) op c. */
1027 if (GET_CODE (op1) == code
1028 && associative_constant_p (XEXP (op1, 1)))
1030 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1031 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1037 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1038 and OP1. Return 0 if no simplification is possible.
1040 Don't use this for relational operations such as EQ or LT.
1041 Use simplify_relational_operation instead. */
1043 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1046 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1048 unsigned int width = GET_MODE_BITSIZE (mode);
1050 rtx trueop0 = avoid_constant_pool_reference (op0);
1051 rtx trueop1 = avoid_constant_pool_reference (op1);
1053 /* Relational operations don't work here. We must know the mode
1054 of the operands in order to do the comparison correctly.
1055 Assuming a full word can give incorrect results.
1056 Consider comparing 128 with -128 in QImode. */
1058 if (GET_RTX_CLASS (code) == '<')
1061 /* Make sure the constant is second. */
1062 if (GET_RTX_CLASS (code) == 'c'
1063 && swap_commutative_operands_p (trueop0, trueop1))
1065 tem = op0, op0 = op1, op1 = tem;
1066 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1069 if (VECTOR_MODE_P (mode)
1070 && GET_CODE (trueop0) == CONST_VECTOR
1071 && GET_CODE (trueop1) == CONST_VECTOR)
1073 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1074 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1075 enum machine_mode op0mode = GET_MODE (trueop0);
1076 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1077 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1078 enum machine_mode op1mode = GET_MODE (trueop1);
1079 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1080 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1081 rtvec v = rtvec_alloc (n_elts);
1084 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1087 for (i = 0; i < n_elts; i++)
1089 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1090 CONST_VECTOR_ELT (trueop0, i),
1091 CONST_VECTOR_ELT (trueop1, i));
1094 RTVEC_ELT (v, i) = x;
1097 return gen_rtx_CONST_VECTOR (mode, v);
1100 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1101 && GET_CODE (trueop0) == CONST_DOUBLE
1102 && GET_CODE (trueop1) == CONST_DOUBLE
1103 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1105 REAL_VALUE_TYPE f0, f1, value;
1107 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1108 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1109 f0 = real_value_truncate (mode, f0);
1110 f1 = real_value_truncate (mode, f1);
1112 if (HONOR_SNANS (mode)
1113 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1117 && REAL_VALUES_EQUAL (f1, dconst0)
1118 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1121 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1123 value = real_value_truncate (mode, value);
1124 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1127 /* We can fold some multi-word operations. */
1128 if (GET_MODE_CLASS (mode) == MODE_INT
1129 && width == HOST_BITS_PER_WIDE_INT * 2
1130 && (GET_CODE (trueop0) == CONST_DOUBLE
1131 || GET_CODE (trueop0) == CONST_INT)
1132 && (GET_CODE (trueop1) == CONST_DOUBLE
1133 || GET_CODE (trueop1) == CONST_INT))
1135 unsigned HOST_WIDE_INT l1, l2, lv;
1136 HOST_WIDE_INT h1, h2, hv;
1138 if (GET_CODE (trueop0) == CONST_DOUBLE)
1139 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1141 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1143 if (GET_CODE (trueop1) == CONST_DOUBLE)
1144 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1146 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1151 /* A - B == A + (-B). */
1152 neg_double (l2, h2, &lv, &hv);
1155 /* Fall through.... */
1158 add_double (l1, h1, l2, h2, &lv, &hv);
1162 mul_double (l1, h1, l2, h2, &lv, &hv);
1165 case DIV: case MOD: case UDIV: case UMOD:
1166 /* We'd need to include tree.h to do this and it doesn't seem worth
1171 lv = l1 & l2, hv = h1 & h2;
1175 lv = l1 | l2, hv = h1 | h2;
1179 lv = l1 ^ l2, hv = h1 ^ h2;
1185 && ((unsigned HOST_WIDE_INT) l1
1186 < (unsigned HOST_WIDE_INT) l2)))
1195 && ((unsigned HOST_WIDE_INT) l1
1196 > (unsigned HOST_WIDE_INT) l2)))
1203 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1205 && ((unsigned HOST_WIDE_INT) l1
1206 < (unsigned HOST_WIDE_INT) l2)))
1213 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1215 && ((unsigned HOST_WIDE_INT) l1
1216 > (unsigned HOST_WIDE_INT) l2)))
1222 case LSHIFTRT: case ASHIFTRT:
1224 case ROTATE: case ROTATERT:
1225 #ifdef SHIFT_COUNT_TRUNCATED
1226 if (SHIFT_COUNT_TRUNCATED)
1227 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1230 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1233 if (code == LSHIFTRT || code == ASHIFTRT)
1234 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1236 else if (code == ASHIFT)
1237 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1238 else if (code == ROTATE)
1239 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1240 else /* code == ROTATERT */
1241 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1248 return immed_double_const (lv, hv, mode);
1251 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1252 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1254 /* Even if we can't compute a constant result,
1255 there are some cases worth simplifying. */
1260 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1261 when x is NaN, infinite, or finite and nonzero. They aren't
1262 when x is -0 and the rounding mode is not towards -infinity,
1263 since (-0) + 0 is then 0. */
1264 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1267 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1268 transformations are safe even for IEEE. */
1269 if (GET_CODE (op0) == NEG)
1270 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1271 else if (GET_CODE (op1) == NEG)
1272 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1274 /* (~a) + 1 -> -a */
1275 if (INTEGRAL_MODE_P (mode)
1276 && GET_CODE (op0) == NOT
1277 && trueop1 == const1_rtx)
1278 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1280 /* Handle both-operands-constant cases. We can only add
1281 CONST_INTs to constants since the sum of relocatable symbols
1282 can't be handled by most assemblers. Don't add CONST_INT
1283 to CONST_INT since overflow won't be computed properly if wider
1284 than HOST_BITS_PER_WIDE_INT. */
1286 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1287 && GET_CODE (op1) == CONST_INT)
1288 return plus_constant (op0, INTVAL (op1));
1289 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1290 && GET_CODE (op0) == CONST_INT)
1291 return plus_constant (op1, INTVAL (op0));
1293 /* See if this is something like X * C - X or vice versa or
1294 if the multiplication is written as a shift. If so, we can
1295 distribute and make a new multiply, shift, or maybe just
1296 have X (if C is 2 in the example above). But don't make
1297 real multiply if we didn't have one before. */
1299 if (! FLOAT_MODE_P (mode))
1301 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1302 rtx lhs = op0, rhs = op1;
1305 if (GET_CODE (lhs) == NEG)
1306 coeff0 = -1, lhs = XEXP (lhs, 0);
1307 else if (GET_CODE (lhs) == MULT
1308 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1310 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1313 else if (GET_CODE (lhs) == ASHIFT
1314 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1315 && INTVAL (XEXP (lhs, 1)) >= 0
1316 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1318 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1319 lhs = XEXP (lhs, 0);
1322 if (GET_CODE (rhs) == NEG)
1323 coeff1 = -1, rhs = XEXP (rhs, 0);
1324 else if (GET_CODE (rhs) == MULT
1325 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1327 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1330 else if (GET_CODE (rhs) == ASHIFT
1331 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1332 && INTVAL (XEXP (rhs, 1)) >= 0
1333 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1335 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1336 rhs = XEXP (rhs, 0);
1339 if (rtx_equal_p (lhs, rhs))
1341 tem = simplify_gen_binary (MULT, mode, lhs,
1342 GEN_INT (coeff0 + coeff1));
1343 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1347 /* If one of the operands is a PLUS or a MINUS, see if we can
1348 simplify this by the associative law.
1349 Don't use the associative law for floating point.
1350 The inaccuracy makes it nonassociative,
1351 and subtle programs can break if operations are associated. */
1353 if (INTEGRAL_MODE_P (mode)
1354 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1355 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1356 || (GET_CODE (op0) == CONST
1357 && GET_CODE (XEXP (op0, 0)) == PLUS)
1358 || (GET_CODE (op1) == CONST
1359 && GET_CODE (XEXP (op1, 0)) == PLUS))
1360 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1363 /* Reassociate floating point addition only when the user
1364 specifies unsafe math optimizations. */
1365 if (FLOAT_MODE_P (mode)
1366 && flag_unsafe_math_optimizations)
1368 tem = simplify_associative_operation (code, mode, op0, op1);
1376 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1377 using cc0, in which case we want to leave it as a COMPARE
1378 so we can distinguish it from a register-register-copy.
1380 In IEEE floating point, x-0 is not the same as x. */
1382 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1383 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1384 && trueop1 == CONST0_RTX (mode))
1388 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1389 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1390 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1391 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1393 rtx xop00 = XEXP (op0, 0);
1394 rtx xop10 = XEXP (op1, 0);
1397 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1399 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1400 && GET_MODE (xop00) == GET_MODE (xop10)
1401 && REGNO (xop00) == REGNO (xop10)
1402 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1403 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1410 /* We can't assume x-x is 0 even with non-IEEE floating point,
1411 but since it is zero except in very strange circumstances, we
1412 will treat it as zero with -funsafe-math-optimizations. */
1413 if (rtx_equal_p (trueop0, trueop1)
1414 && ! side_effects_p (op0)
1415 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1416 return CONST0_RTX (mode);
1418 /* Change subtraction from zero into negation. (0 - x) is the
1419 same as -x when x is NaN, infinite, or finite and nonzero.
1420 But if the mode has signed zeros, and does not round towards
1421 -infinity, then 0 - 0 is 0, not -0. */
1422 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1423 return simplify_gen_unary (NEG, mode, op1, mode);
1425 /* (-1 - a) is ~a. */
1426 if (trueop0 == constm1_rtx)
1427 return simplify_gen_unary (NOT, mode, op1, mode);
1429 /* Subtracting 0 has no effect unless the mode has signed zeros
1430 and supports rounding towards -infinity. In such a case,
1432 if (!(HONOR_SIGNED_ZEROS (mode)
1433 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1434 && trueop1 == CONST0_RTX (mode))
1437 /* See if this is something like X * C - X or vice versa or
1438 if the multiplication is written as a shift. If so, we can
1439 distribute and make a new multiply, shift, or maybe just
1440 have X (if C is 2 in the example above). But don't make
1441 real multiply if we didn't have one before. */
1443 if (! FLOAT_MODE_P (mode))
1445 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1446 rtx lhs = op0, rhs = op1;
1449 if (GET_CODE (lhs) == NEG)
1450 coeff0 = -1, lhs = XEXP (lhs, 0);
1451 else if (GET_CODE (lhs) == MULT
1452 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1454 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1457 else if (GET_CODE (lhs) == ASHIFT
1458 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1459 && INTVAL (XEXP (lhs, 1)) >= 0
1460 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1462 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1463 lhs = XEXP (lhs, 0);
1466 if (GET_CODE (rhs) == NEG)
1467 coeff1 = - 1, rhs = XEXP (rhs, 0);
1468 else if (GET_CODE (rhs) == MULT
1469 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1471 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1474 else if (GET_CODE (rhs) == ASHIFT
1475 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1476 && INTVAL (XEXP (rhs, 1)) >= 0
1477 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1479 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1480 rhs = XEXP (rhs, 0);
1483 if (rtx_equal_p (lhs, rhs))
1485 tem = simplify_gen_binary (MULT, mode, lhs,
1486 GEN_INT (coeff0 - coeff1));
1487 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1491 /* (a - (-b)) -> (a + b). True even for IEEE. */
1492 if (GET_CODE (op1) == NEG)
1493 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1495 /* If one of the operands is a PLUS or a MINUS, see if we can
1496 simplify this by the associative law.
1497 Don't use the associative law for floating point.
1498 The inaccuracy makes it nonassociative,
1499 and subtle programs can break if operations are associated. */
1501 if (INTEGRAL_MODE_P (mode)
1502 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1503 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1504 || (GET_CODE (op0) == CONST
1505 && GET_CODE (XEXP (op0, 0)) == PLUS)
1506 || (GET_CODE (op1) == CONST
1507 && GET_CODE (XEXP (op1, 0)) == PLUS))
1508 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1511 /* Don't let a relocatable value get a negative coeff. */
1512 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1513 return simplify_gen_binary (PLUS, mode,
1515 neg_const_int (mode, op1));
1517 /* (x - (x & y)) -> (x & ~y) */
1518 if (GET_CODE (op1) == AND)
1520 if (rtx_equal_p (op0, XEXP (op1, 0)))
1522 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1523 GET_MODE (XEXP (op1, 1)));
1524 return simplify_gen_binary (AND, mode, op0, tem);
1526 if (rtx_equal_p (op0, XEXP (op1, 1)))
1528 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1529 GET_MODE (XEXP (op1, 0)));
1530 return simplify_gen_binary (AND, mode, op0, tem);
1536 if (trueop1 == constm1_rtx)
1537 return simplify_gen_unary (NEG, mode, op0, mode);
1539 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1540 x is NaN, since x * 0 is then also NaN. Nor is it valid
1541 when the mode has signed zeros, since multiplying a negative
1542 number by 0 will give -0, not 0. */
1543 if (!HONOR_NANS (mode)
1544 && !HONOR_SIGNED_ZEROS (mode)
1545 && trueop1 == CONST0_RTX (mode)
1546 && ! side_effects_p (op0))
1549 /* In IEEE floating point, x*1 is not equivalent to x for
1551 if (!HONOR_SNANS (mode)
1552 && trueop1 == CONST1_RTX (mode))
1555 /* Convert multiply by constant power of two into shift unless
1556 we are still generating RTL. This test is a kludge. */
1557 if (GET_CODE (trueop1) == CONST_INT
1558 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1559 /* If the mode is larger than the host word size, and the
1560 uppermost bit is set, then this isn't a power of two due
1561 to implicit sign extension. */
1562 && (width <= HOST_BITS_PER_WIDE_INT
1563 || val != HOST_BITS_PER_WIDE_INT - 1)
1564 && ! rtx_equal_function_value_matters)
1565 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1567 /* x*2 is x+x and x*(-1) is -x */
1568 if (GET_CODE (trueop1) == CONST_DOUBLE
1569 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1570 && GET_MODE (op0) == mode)
1573 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1575 if (REAL_VALUES_EQUAL (d, dconst2))
1576 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1578 if (REAL_VALUES_EQUAL (d, dconstm1))
1579 return simplify_gen_unary (NEG, mode, op0, mode);
1582 /* Reassociate multiplication, but for floating point MULTs
1583 only when the user specifies unsafe math optimizations. */
1584 if (! FLOAT_MODE_P (mode)
1585 || flag_unsafe_math_optimizations)
1587 tem = simplify_associative_operation (code, mode, op0, op1);
1594 if (trueop1 == const0_rtx)
1596 if (GET_CODE (trueop1) == CONST_INT
1597 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1598 == GET_MODE_MASK (mode)))
1600 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1602 /* A | (~A) -> -1 */
1603 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1604 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1605 && ! side_effects_p (op0)
1606 && GET_MODE_CLASS (mode) != MODE_CC)
1608 tem = simplify_associative_operation (code, mode, op0, op1);
1614 if (trueop1 == const0_rtx)
1616 if (GET_CODE (trueop1) == CONST_INT
1617 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1618 == GET_MODE_MASK (mode)))
1619 return simplify_gen_unary (NOT, mode, op0, mode);
1620 if (trueop0 == trueop1 && ! side_effects_p (op0)
1621 && GET_MODE_CLASS (mode) != MODE_CC)
1623 tem = simplify_associative_operation (code, mode, op0, op1);
1629 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1631 if (GET_CODE (trueop1) == CONST_INT
1632 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1633 == GET_MODE_MASK (mode)))
1635 if (trueop0 == trueop1 && ! side_effects_p (op0)
1636 && GET_MODE_CLASS (mode) != MODE_CC)
1639 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1640 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1641 && ! side_effects_p (op0)
1642 && GET_MODE_CLASS (mode) != MODE_CC)
1644 tem = simplify_associative_operation (code, mode, op0, op1);
1650 /* Convert divide by power of two into shift (divide by 1 handled
1652 if (GET_CODE (trueop1) == CONST_INT
1653 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1654 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1656 /* Fall through.... */
1659 if (trueop1 == CONST1_RTX (mode))
1661 /* On some platforms DIV uses narrower mode than its
1663 rtx x = gen_lowpart_common (mode, op0);
1666 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1667 return gen_lowpart_SUBREG (mode, op0);
1672 /* Maybe change 0 / x to 0. This transformation isn't safe for
1673 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1674 Nor is it safe for modes with signed zeros, since dividing
1675 0 by a negative number gives -0, not 0. */
1676 if (!HONOR_NANS (mode)
1677 && !HONOR_SIGNED_ZEROS (mode)
1678 && trueop0 == CONST0_RTX (mode)
1679 && ! side_effects_p (op1))
1682 /* Change division by a constant into multiplication. Only do
1683 this with -funsafe-math-optimizations. */
1684 else if (GET_CODE (trueop1) == CONST_DOUBLE
1685 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1686 && trueop1 != CONST0_RTX (mode)
1687 && flag_unsafe_math_optimizations)
1690 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1692 if (! REAL_VALUES_EQUAL (d, dconst0))
1694 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1695 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1696 return simplify_gen_binary (MULT, mode, op0, tem);
1702 /* Handle modulus by power of two (mod with 1 handled below). */
1703 if (GET_CODE (trueop1) == CONST_INT
1704 && exact_log2 (INTVAL (trueop1)) > 0)
1705 return simplify_gen_binary (AND, mode, op0,
1706 GEN_INT (INTVAL (op1) - 1));
1708 /* Fall through.... */
1711 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1712 && ! side_effects_p (op0) && ! side_effects_p (op1))
1719 /* Rotating ~0 always results in ~0. */
1720 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1721 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1722 && ! side_effects_p (op1))
1725 /* Fall through.... */
1729 if (trueop1 == const0_rtx)
1731 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1736 if (width <= HOST_BITS_PER_WIDE_INT
1737 && GET_CODE (trueop1) == CONST_INT
1738 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1739 && ! side_effects_p (op0))
1741 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1743 tem = simplify_associative_operation (code, mode, op0, op1);
1749 if (width <= HOST_BITS_PER_WIDE_INT
1750 && GET_CODE (trueop1) == CONST_INT
1751 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1752 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1753 && ! side_effects_p (op0))
1755 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1757 tem = simplify_associative_operation (code, mode, op0, op1);
1763 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1765 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1767 tem = simplify_associative_operation (code, mode, op0, op1);
1773 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1775 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1777 tem = simplify_associative_operation (code, mode, op0, op1);
1786 /* ??? There are simplifications that can be done. */
1790 if (!VECTOR_MODE_P (mode))
1792 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1794 != GET_MODE_INNER (GET_MODE (trueop0)))
1795 || GET_CODE (trueop1) != PARALLEL
1796 || XVECLEN (trueop1, 0) != 1
1797 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1800 if (GET_CODE (trueop0) == CONST_VECTOR)
1801 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1805 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1806 || (GET_MODE_INNER (mode)
1807 != GET_MODE_INNER (GET_MODE (trueop0)))
1808 || GET_CODE (trueop1) != PARALLEL)
1811 if (GET_CODE (trueop0) == CONST_VECTOR)
1813 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1814 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1815 rtvec v = rtvec_alloc (n_elts);
1818 if (XVECLEN (trueop1, 0) != (int) n_elts)
1820 for (i = 0; i < n_elts; i++)
1822 rtx x = XVECEXP (trueop1, 0, i);
1824 if (GET_CODE (x) != CONST_INT)
1826 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1829 return gen_rtx_CONST_VECTOR (mode, v);
1835 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1836 ? GET_MODE (trueop0)
1837 : GET_MODE_INNER (mode));
1838 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1839 ? GET_MODE (trueop1)
1840 : GET_MODE_INNER (mode));
1842 if (!VECTOR_MODE_P (mode)
1843 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1844 != GET_MODE_SIZE (mode)))
1847 if ((VECTOR_MODE_P (op0_mode)
1848 && (GET_MODE_INNER (mode)
1849 != GET_MODE_INNER (op0_mode)))
1850 || (!VECTOR_MODE_P (op0_mode)
1851 && GET_MODE_INNER (mode) != op0_mode))
1854 if ((VECTOR_MODE_P (op1_mode)
1855 && (GET_MODE_INNER (mode)
1856 != GET_MODE_INNER (op1_mode)))
1857 || (!VECTOR_MODE_P (op1_mode)
1858 && GET_MODE_INNER (mode) != op1_mode))
1861 if ((GET_CODE (trueop0) == CONST_VECTOR
1862 || GET_CODE (trueop0) == CONST_INT
1863 || GET_CODE (trueop0) == CONST_DOUBLE)
1864 && (GET_CODE (trueop1) == CONST_VECTOR
1865 || GET_CODE (trueop1) == CONST_INT
1866 || GET_CODE (trueop1) == CONST_DOUBLE))
1868 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1869 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1870 rtvec v = rtvec_alloc (n_elts);
1872 unsigned in_n_elts = 1;
1874 if (VECTOR_MODE_P (op0_mode))
1875 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1876 for (i = 0; i < n_elts; i++)
1880 if (!VECTOR_MODE_P (op0_mode))
1881 RTVEC_ELT (v, i) = trueop0;
1883 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1887 if (!VECTOR_MODE_P (op1_mode))
1888 RTVEC_ELT (v, i) = trueop1;
1890 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1895 return gen_rtx_CONST_VECTOR (mode, v);
1907 /* Get the integer argument values in two forms:
1908 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1910 arg0 = INTVAL (trueop0);
1911 arg1 = INTVAL (trueop1);
1913 if (width < HOST_BITS_PER_WIDE_INT)
1915 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1916 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1919 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1920 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1923 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1924 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1932 /* Compute the value of the arithmetic. */
1937 val = arg0s + arg1s;
1941 val = arg0s - arg1s;
1945 val = arg0s * arg1s;
1950 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1953 val = arg0s / arg1s;
1958 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1961 val = arg0s % arg1s;
1966 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1969 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1974 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1977 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1993 /* If shift count is undefined, don't fold it; let the machine do
1994 what it wants. But truncate it if the machine will do that. */
1998 #ifdef SHIFT_COUNT_TRUNCATED
1999 if (SHIFT_COUNT_TRUNCATED)
2003 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2010 #ifdef SHIFT_COUNT_TRUNCATED
2011 if (SHIFT_COUNT_TRUNCATED)
2015 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2022 #ifdef SHIFT_COUNT_TRUNCATED
2023 if (SHIFT_COUNT_TRUNCATED)
2027 val = arg0s >> arg1;
2029 /* Bootstrap compiler may not have sign extended the right shift.
2030 Manually extend the sign to insure bootstrap cc matches gcc. */
2031 if (arg0s < 0 && arg1 > 0)
2032 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2041 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2042 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2050 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2051 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2055 /* Do nothing here. */
2059 val = arg0s <= arg1s ? arg0s : arg1s;
2063 val = ((unsigned HOST_WIDE_INT) arg0
2064 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2068 val = arg0s > arg1s ? arg0s : arg1s;
2072 val = ((unsigned HOST_WIDE_INT) arg0
2073 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2080 /* ??? There are simplifications that can be done. */
2087 val = trunc_int_for_mode (val, mode);
2089 return GEN_INT (val);
2092 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2095 Rather than test for specific case, we do this by a brute-force method
2096 and do all possible simplifications until no more changes occur. Then
2097 we rebuild the operation.
2099 If FORCE is true, then always generate the rtx. This is used to
2100 canonicalize stuff emitted from simplify_gen_binary. Note that this
2101 can still fail if the rtx is too complex. It won't fail just because
2102 the result is not 'simpler' than the input, however. */
2104 struct simplify_plus_minus_op_data
2111 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2113 const struct simplify_plus_minus_op_data *d1 = p1;
2114 const struct simplify_plus_minus_op_data *d2 = p2;
2116 return (commutative_operand_precedence (d2->op)
2117 - commutative_operand_precedence (d1->op));
2121 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2124 struct simplify_plus_minus_op_data ops[8];
2126 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2127 int first, negate, changed;
2130 memset (ops, 0, sizeof ops);
2132 /* Set up the two operands and then expand them until nothing has been
2133 changed. If we run out of room in our array, give up; this should
2134 almost never happen. */
2139 ops[1].neg = (code == MINUS);
2145 for (i = 0; i < n_ops; i++)
2147 rtx this_op = ops[i].op;
2148 int this_neg = ops[i].neg;
2149 enum rtx_code this_code = GET_CODE (this_op);
2158 ops[n_ops].op = XEXP (this_op, 1);
2159 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2162 ops[i].op = XEXP (this_op, 0);
2168 ops[i].op = XEXP (this_op, 0);
2169 ops[i].neg = ! this_neg;
2175 && GET_CODE (XEXP (this_op, 0)) == PLUS
2176 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2177 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2179 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2180 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2181 ops[n_ops].neg = this_neg;
2189 /* ~a -> (-a - 1) */
2192 ops[n_ops].op = constm1_rtx;
2193 ops[n_ops++].neg = this_neg;
2194 ops[i].op = XEXP (this_op, 0);
2195 ops[i].neg = !this_neg;
2203 ops[i].op = neg_const_int (mode, this_op);
2216 /* If we only have two operands, we can't do anything. */
2217 if (n_ops <= 2 && !force)
2220 /* Count the number of CONSTs we didn't split above. */
2221 for (i = 0; i < n_ops; i++)
2222 if (GET_CODE (ops[i].op) == CONST)
2225 /* Now simplify each pair of operands until nothing changes. The first
2226 time through just simplify constants against each other. */
2233 for (i = 0; i < n_ops - 1; i++)
2234 for (j = i + 1; j < n_ops; j++)
2236 rtx lhs = ops[i].op, rhs = ops[j].op;
2237 int lneg = ops[i].neg, rneg = ops[j].neg;
2239 if (lhs != 0 && rhs != 0
2240 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2242 enum rtx_code ncode = PLUS;
2248 tem = lhs, lhs = rhs, rhs = tem;
2250 else if (swap_commutative_operands_p (lhs, rhs))
2251 tem = lhs, lhs = rhs, rhs = tem;
2253 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2255 /* Reject "simplifications" that just wrap the two
2256 arguments in a CONST. Failure to do so can result
2257 in infinite recursion with simplify_binary_operation
2258 when it calls us to simplify CONST operations. */
2260 && ! (GET_CODE (tem) == CONST
2261 && GET_CODE (XEXP (tem, 0)) == ncode
2262 && XEXP (XEXP (tem, 0), 0) == lhs
2263 && XEXP (XEXP (tem, 0), 1) == rhs)
2264 /* Don't allow -x + -1 -> ~x simplifications in the
2265 first pass. This allows us the chance to combine
2266 the -1 with other constants. */
2268 && GET_CODE (tem) == NOT
2269 && XEXP (tem, 0) == rhs))
2272 if (GET_CODE (tem) == NEG)
2273 tem = XEXP (tem, 0), lneg = !lneg;
2274 if (GET_CODE (tem) == CONST_INT && lneg)
2275 tem = neg_const_int (mode, tem), lneg = 0;
2279 ops[j].op = NULL_RTX;
2289 /* Pack all the operands to the lower-numbered entries. */
2290 for (i = 0, j = 0; j < n_ops; j++)
2295 /* Sort the operations based on swap_commutative_operands_p. */
2296 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2298 /* We suppressed creation of trivial CONST expressions in the
2299 combination loop to avoid recursion. Create one manually now.
2300 The combination loop should have ensured that there is exactly
2301 one CONST_INT, and the sort will have ensured that it is last
2302 in the array and that any other constant will be next-to-last. */
2305 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2306 && CONSTANT_P (ops[n_ops - 2].op))
2308 rtx value = ops[n_ops - 1].op;
2309 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2310 value = neg_const_int (mode, value);
2311 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2315 /* Count the number of CONSTs that we generated. */
2317 for (i = 0; i < n_ops; i++)
2318 if (GET_CODE (ops[i].op) == CONST)
2321 /* Give up if we didn't reduce the number of operands we had. Make
2322 sure we count a CONST as two operands. If we have the same
2323 number of operands, but have made more CONSTs than before, this
2324 is also an improvement, so accept it. */
2326 && (n_ops + n_consts > input_ops
2327 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2330 /* Put a non-negated operand first. If there aren't any, make all
2331 operands positive and negate the whole thing later. */
2334 for (i = 0; i < n_ops && ops[i].neg; i++)
2338 for (i = 0; i < n_ops; i++)
2350 /* Now make the result by performing the requested operations. */
2352 for (i = 1; i < n_ops; i++)
2353 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2354 mode, result, ops[i].op);
2356 return negate ? gen_rtx_NEG (mode, result) : result;
2359 /* Like simplify_binary_operation except used for relational operators.
2360 MODE is the mode of the operands, not that of the result. If MODE
2361 is VOIDmode, both operands must also be VOIDmode and we compare the
2362 operands in "infinite precision".
2364 If no simplification is possible, this function returns zero. Otherwise,
2365 it returns either const_true_rtx or const0_rtx. */
2368 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2371 int equal, op0lt, op0ltu, op1lt, op1ltu;
2376 if (mode == VOIDmode
2377 && (GET_MODE (op0) != VOIDmode
2378 || GET_MODE (op1) != VOIDmode))
2381 /* If op0 is a compare, extract the comparison arguments from it. */
2382 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2383 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2385 trueop0 = avoid_constant_pool_reference (op0);
2386 trueop1 = avoid_constant_pool_reference (op1);
2388 /* We can't simplify MODE_CC values since we don't know what the
2389 actual comparison is. */
2390 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2393 /* Make sure the constant is second. */
2394 if (swap_commutative_operands_p (trueop0, trueop1))
2396 tem = op0, op0 = op1, op1 = tem;
2397 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2398 code = swap_condition (code);
2401 /* For integer comparisons of A and B maybe we can simplify A - B and can
2402 then simplify a comparison of that with zero. If A and B are both either
2403 a register or a CONST_INT, this can't help; testing for these cases will
2404 prevent infinite recursion here and speed things up.
2406 If CODE is an unsigned comparison, then we can never do this optimization,
2407 because it gives an incorrect result if the subtraction wraps around zero.
2408 ANSI C defines unsigned operations such that they never overflow, and
2409 thus such cases can not be ignored. */
2411 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2412 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2413 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2414 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2415 && code != GTU && code != GEU && code != LTU && code != LEU)
2416 return simplify_relational_operation (signed_condition (code),
2417 mode, tem, const0_rtx);
2419 if (flag_unsafe_math_optimizations && code == ORDERED)
2420 return const_true_rtx;
2422 if (flag_unsafe_math_optimizations && code == UNORDERED)
2425 /* For modes without NaNs, if the two operands are equal, we know the
2426 result except if they have side-effects. */
2427 if (! HONOR_NANS (GET_MODE (trueop0))
2428 && rtx_equal_p (trueop0, trueop1)
2429 && ! side_effects_p (trueop0))
2430 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2432 /* If the operands are floating-point constants, see if we can fold
2434 else if (GET_CODE (trueop0) == CONST_DOUBLE
2435 && GET_CODE (trueop1) == CONST_DOUBLE
2436 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2438 REAL_VALUE_TYPE d0, d1;
2440 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2441 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2443 /* Comparisons are unordered iff at least one of the values is NaN. */
2444 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2454 return const_true_rtx;
2467 equal = REAL_VALUES_EQUAL (d0, d1);
2468 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2469 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2472 /* Otherwise, see if the operands are both integers. */
2473 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2474 && (GET_CODE (trueop0) == CONST_DOUBLE
2475 || GET_CODE (trueop0) == CONST_INT)
2476 && (GET_CODE (trueop1) == CONST_DOUBLE
2477 || GET_CODE (trueop1) == CONST_INT))
2479 int width = GET_MODE_BITSIZE (mode);
2480 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2481 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2483 /* Get the two words comprising each integer constant. */
2484 if (GET_CODE (trueop0) == CONST_DOUBLE)
2486 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2487 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2491 l0u = l0s = INTVAL (trueop0);
2492 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2495 if (GET_CODE (trueop1) == CONST_DOUBLE)
2497 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2498 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2502 l1u = l1s = INTVAL (trueop1);
2503 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2506 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2507 we have to sign or zero-extend the values. */
2508 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2510 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2511 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2513 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2514 l0s |= ((HOST_WIDE_INT) (-1) << width);
2516 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2517 l1s |= ((HOST_WIDE_INT) (-1) << width);
2519 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2520 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2522 equal = (h0u == h1u && l0u == l1u);
2523 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2524 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2525 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2526 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2529 /* Otherwise, there are some code-specific tests we can make. */
2535 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2540 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2541 return const_true_rtx;
2545 /* Unsigned values are never negative. */
2546 if (trueop1 == const0_rtx)
2547 return const_true_rtx;
2551 if (trueop1 == const0_rtx)
2556 /* Unsigned values are never greater than the largest
2558 if (GET_CODE (trueop1) == CONST_INT
2559 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2560 && INTEGRAL_MODE_P (mode))
2561 return const_true_rtx;
2565 if (GET_CODE (trueop1) == CONST_INT
2566 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2567 && INTEGRAL_MODE_P (mode))
2572 /* Optimize abs(x) < 0.0. */
2573 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2575 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2577 if (GET_CODE (tem) == ABS)
2583 /* Optimize abs(x) >= 0.0. */
2584 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2586 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2588 if (GET_CODE (tem) == ABS)
2589 return const_true_rtx;
2594 /* Optimize ! (abs(x) < 0.0). */
2595 if (trueop1 == CONST0_RTX (mode))
2597 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2599 if (GET_CODE (tem) == ABS)
2600 return const_true_rtx;
2611 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2617 return equal ? const_true_rtx : const0_rtx;
2620 return ! equal ? const_true_rtx : const0_rtx;
2623 return op0lt ? const_true_rtx : const0_rtx;
2626 return op1lt ? const_true_rtx : const0_rtx;
2628 return op0ltu ? const_true_rtx : const0_rtx;
2630 return op1ltu ? const_true_rtx : const0_rtx;
2633 return equal || op0lt ? const_true_rtx : const0_rtx;
2636 return equal || op1lt ? const_true_rtx : const0_rtx;
2638 return equal || op0ltu ? const_true_rtx : const0_rtx;
2640 return equal || op1ltu ? const_true_rtx : const0_rtx;
2642 return const_true_rtx;
2650 /* Simplify CODE, an operation with result mode MODE and three operands,
2651 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2652 a constant. Return 0 if no simplifications is possible. */
2655 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2656 enum machine_mode op0_mode, rtx op0, rtx op1,
2659 unsigned int width = GET_MODE_BITSIZE (mode);
2661 /* VOIDmode means "infinite" precision. */
2663 width = HOST_BITS_PER_WIDE_INT;
2669 if (GET_CODE (op0) == CONST_INT
2670 && GET_CODE (op1) == CONST_INT
2671 && GET_CODE (op2) == CONST_INT
2672 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2673 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2675 /* Extracting a bit-field from a constant */
2676 HOST_WIDE_INT val = INTVAL (op0);
2678 if (BITS_BIG_ENDIAN)
2679 val >>= (GET_MODE_BITSIZE (op0_mode)
2680 - INTVAL (op2) - INTVAL (op1));
2682 val >>= INTVAL (op2);
2684 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2686 /* First zero-extend. */
2687 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2688 /* If desired, propagate sign bit. */
2689 if (code == SIGN_EXTRACT
2690 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2691 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2694 /* Clear the bits that don't belong in our mode,
2695 unless they and our sign bit are all one.
2696 So we get either a reasonable negative value or a reasonable
2697 unsigned value for this mode. */
2698 if (width < HOST_BITS_PER_WIDE_INT
2699 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2700 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2701 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2703 return GEN_INT (val);
2708 if (GET_CODE (op0) == CONST_INT)
2709 return op0 != const0_rtx ? op1 : op2;
2711 /* Convert a == b ? b : a to "a". */
2712 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2713 && !HONOR_NANS (mode)
2714 && rtx_equal_p (XEXP (op0, 0), op1)
2715 && rtx_equal_p (XEXP (op0, 1), op2))
2717 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2718 && !HONOR_NANS (mode)
2719 && rtx_equal_p (XEXP (op0, 1), op1)
2720 && rtx_equal_p (XEXP (op0, 0), op2))
2722 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2724 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2725 ? GET_MODE (XEXP (op0, 1))
2726 : GET_MODE (XEXP (op0, 0)));
2728 if (cmp_mode == VOIDmode)
2729 cmp_mode = op0_mode;
2730 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2731 XEXP (op0, 0), XEXP (op0, 1));
2733 /* See if any simplifications were possible. */
2734 if (temp == const0_rtx)
2736 else if (temp == const_true_rtx)
2741 /* Look for happy constants in op1 and op2. */
2742 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2744 HOST_WIDE_INT t = INTVAL (op1);
2745 HOST_WIDE_INT f = INTVAL (op2);
2747 if (t == STORE_FLAG_VALUE && f == 0)
2748 code = GET_CODE (op0);
2749 else if (t == 0 && f == STORE_FLAG_VALUE)
2752 tmp = reversed_comparison_code (op0, NULL_RTX);
2760 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2765 if (GET_MODE (op0) != mode
2766 || GET_MODE (op1) != mode
2767 || !VECTOR_MODE_P (mode))
2769 op2 = avoid_constant_pool_reference (op2);
2770 if (GET_CODE (op2) == CONST_INT)
2772 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2773 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2774 int mask = (1 << n_elts) - 1;
2776 if (!(INTVAL (op2) & mask))
2778 if ((INTVAL (op2) & mask) == mask)
2781 op0 = avoid_constant_pool_reference (op0);
2782 op1 = avoid_constant_pool_reference (op1);
2783 if (GET_CODE (op0) == CONST_VECTOR
2784 && GET_CODE (op1) == CONST_VECTOR)
2786 rtvec v = rtvec_alloc (n_elts);
2789 for (i = 0; i < n_elts; i++)
2790 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2791 ? CONST_VECTOR_ELT (op0, i)
2792 : CONST_VECTOR_ELT (op1, i));
2793 return gen_rtx_CONST_VECTOR (mode, v);
2805 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2806 Return 0 if no simplifications is possible. */
2808 simplify_subreg (enum machine_mode outermode, rtx op,
2809 enum machine_mode innermode, unsigned int byte)
2811 /* Little bit of sanity checking. */
2812 if (innermode == VOIDmode || outermode == VOIDmode
2813 || innermode == BLKmode || outermode == BLKmode)
2816 if (GET_MODE (op) != innermode
2817 && GET_MODE (op) != VOIDmode)
2820 if (byte % GET_MODE_SIZE (outermode)
2821 || byte >= GET_MODE_SIZE (innermode))
2824 if (outermode == innermode && !byte)
2827 /* Simplify subregs of vector constants. */
2828 if (GET_CODE (op) == CONST_VECTOR)
2830 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2831 const unsigned int offset = byte / elt_size;
2834 if (GET_MODE_INNER (innermode) == outermode)
2836 elt = CONST_VECTOR_ELT (op, offset);
2838 /* ?? We probably don't need this copy_rtx because constants
2839 can be shared. ?? */
2841 return copy_rtx (elt);
2843 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2844 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2846 return (gen_rtx_CONST_VECTOR
2848 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2849 &CONST_VECTOR_ELT (op, offset))));
2851 else if (GET_MODE_CLASS (outermode) == MODE_INT
2852 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2854 /* This happens when the target register size is smaller then
2855 the vector mode, and we synthesize operations with vectors
2856 of elements that are smaller than the register size. */
2857 HOST_WIDE_INT sum = 0, high = 0;
2858 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2859 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2860 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2861 int shift = BITS_PER_UNIT * elt_size;
2862 unsigned HOST_WIDE_INT unit_mask;
2864 unit_mask = (unsigned HOST_WIDE_INT) -1
2865 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2867 for (; n_elts--; i += step)
2869 elt = CONST_VECTOR_ELT (op, i);
2870 if (GET_CODE (elt) == CONST_DOUBLE
2871 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2873 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2878 if (GET_CODE (elt) != CONST_INT)
2880 /* Avoid overflow. */
2881 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2883 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2884 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2886 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2887 return GEN_INT (trunc_int_for_mode (sum, outermode));
2888 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2889 return immed_double_const (sum, high, outermode);
2893 else if (GET_MODE_CLASS (outermode) == MODE_INT
2894 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2896 enum machine_mode new_mode
2897 = int_mode_for_mode (GET_MODE_INNER (innermode));
2898 int subbyte = byte % elt_size;
2900 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2903 return simplify_subreg (outermode, op, new_mode, subbyte);
2905 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2906 /* This shouldn't happen, but let's not do anything stupid. */
2910 /* Attempt to simplify constant to non-SUBREG expression. */
2911 if (CONSTANT_P (op))
2914 unsigned HOST_WIDE_INT val = 0;
2916 if (VECTOR_MODE_P (outermode))
2918 /* Construct a CONST_VECTOR from individual subregs. */
2919 enum machine_mode submode = GET_MODE_INNER (outermode);
2920 int subsize = GET_MODE_UNIT_SIZE (outermode);
2921 int i, elts = GET_MODE_NUNITS (outermode);
2922 rtvec v = rtvec_alloc (elts);
2925 for (i = 0; i < elts; i++, byte += subsize)
2927 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2928 /* ??? It would be nice if we could actually make such subregs
2929 on targets that allow such relocations. */
2930 if (byte >= GET_MODE_SIZE (innermode))
2931 elt = CONST0_RTX (submode);
2933 elt = simplify_subreg (submode, op, innermode, byte);
2936 RTVEC_ELT (v, i) = elt;
2938 return gen_rtx_CONST_VECTOR (outermode, v);
2941 /* ??? This code is partly redundant with code below, but can handle
2942 the subregs of floats and similar corner cases.
2943 Later it we should move all simplification code here and rewrite
2944 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2945 using SIMPLIFY_SUBREG. */
2946 if (subreg_lowpart_offset (outermode, innermode) == byte
2947 && GET_CODE (op) != CONST_VECTOR)
2949 rtx new = gen_lowpart_if_possible (outermode, op);
2954 /* Similar comment as above apply here. */
2955 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2956 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2957 && GET_MODE_CLASS (outermode) == MODE_INT)
2959 rtx new = constant_subword (op,
2960 (byte / UNITS_PER_WORD),
2966 if (GET_MODE_CLASS (outermode) != MODE_INT
2967 && GET_MODE_CLASS (outermode) != MODE_CC)
2969 enum machine_mode new_mode = int_mode_for_mode (outermode);
2971 if (new_mode != innermode || byte != 0)
2973 op = simplify_subreg (new_mode, op, innermode, byte);
2976 return simplify_subreg (outermode, op, new_mode, 0);
2980 offset = byte * BITS_PER_UNIT;
2981 switch (GET_CODE (op))
2984 if (GET_MODE (op) != VOIDmode)
2987 /* We can't handle this case yet. */
2988 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2991 part = offset >= HOST_BITS_PER_WIDE_INT;
2992 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2993 && BYTES_BIG_ENDIAN)
2994 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2995 && WORDS_BIG_ENDIAN))
2997 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2998 offset %= HOST_BITS_PER_WIDE_INT;
3000 /* We've already picked the word we want from a double, so
3001 pretend this is actually an integer. */
3002 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
3006 if (GET_CODE (op) == CONST_INT)
3009 /* We don't handle synthesizing of non-integral constants yet. */
3010 if (GET_MODE_CLASS (outermode) != MODE_INT)
3013 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
3015 if (WORDS_BIG_ENDIAN)
3016 offset = (GET_MODE_BITSIZE (innermode)
3017 - GET_MODE_BITSIZE (outermode) - offset);
3018 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
3019 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
3020 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
3021 - 2 * (offset % BITS_PER_WORD));
3024 if (offset >= HOST_BITS_PER_WIDE_INT)
3025 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
3029 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
3030 val = trunc_int_for_mode (val, outermode);
3031 return GEN_INT (val);
3038 /* Changing mode twice with SUBREG => just change it once,
3039 or not at all if changing back op starting mode. */
3040 if (GET_CODE (op) == SUBREG)
3042 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3043 int final_offset = byte + SUBREG_BYTE (op);
3046 if (outermode == innermostmode
3047 && byte == 0 && SUBREG_BYTE (op) == 0)
3048 return SUBREG_REG (op);
3050 /* The SUBREG_BYTE represents offset, as if the value were stored
3051 in memory. Irritating exception is paradoxical subreg, where
3052 we define SUBREG_BYTE to be 0. On big endian machines, this
3053 value should be negative. For a moment, undo this exception. */
3054 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3056 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3057 if (WORDS_BIG_ENDIAN)
3058 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3059 if (BYTES_BIG_ENDIAN)
3060 final_offset += difference % UNITS_PER_WORD;
3062 if (SUBREG_BYTE (op) == 0
3063 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3065 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3066 if (WORDS_BIG_ENDIAN)
3067 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3068 if (BYTES_BIG_ENDIAN)
3069 final_offset += difference % UNITS_PER_WORD;
3072 /* See whether resulting subreg will be paradoxical. */
3073 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3075 /* In nonparadoxical subregs we can't handle negative offsets. */
3076 if (final_offset < 0)
3078 /* Bail out in case resulting subreg would be incorrect. */
3079 if (final_offset % GET_MODE_SIZE (outermode)
3080 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3086 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3088 /* In paradoxical subreg, see if we are still looking on lower part.
3089 If so, our SUBREG_BYTE will be 0. */
3090 if (WORDS_BIG_ENDIAN)
3091 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3092 if (BYTES_BIG_ENDIAN)
3093 offset += difference % UNITS_PER_WORD;
3094 if (offset == final_offset)
3100 /* Recurse for further possible simplifications. */
3101 new = simplify_subreg (outermode, SUBREG_REG (op),
3102 GET_MODE (SUBREG_REG (op)),
3106 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3109 /* SUBREG of a hard register => just change the register number
3110 and/or mode. If the hard register is not valid in that mode,
3111 suppress this simplification. If the hard register is the stack,
3112 frame, or argument pointer, leave this as a SUBREG. */
3115 && (! REG_FUNCTION_VALUE_P (op)
3116 || ! rtx_equal_function_value_matters)
3117 && REGNO (op) < FIRST_PSEUDO_REGISTER
3118 #ifdef CANNOT_CHANGE_MODE_CLASS
3119 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3120 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3121 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3123 && ((reload_completed && !frame_pointer_needed)
3124 || (REGNO (op) != FRAME_POINTER_REGNUM
3125 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3126 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3129 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3130 && REGNO (op) != ARG_POINTER_REGNUM
3132 && REGNO (op) != STACK_POINTER_REGNUM
3133 && subreg_offset_representable_p (REGNO (op), innermode,
3136 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3137 int final_regno = subreg_hard_regno (tem, 0);
3139 /* ??? We do allow it if the current REG is not valid for
3140 its mode. This is a kludge to work around how float/complex
3141 arguments are passed on 32-bit SPARC and should be fixed. */
3142 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3143 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3145 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3147 /* Propagate original regno. We don't have any way to specify
3148 the offset inside original regno, so do so only for lowpart.
3149 The information is used only by alias analysis that can not
3150 grog partial register anyway. */
3152 if (subreg_lowpart_offset (outermode, innermode) == byte)
3153 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3158 /* If we have a SUBREG of a register that we are replacing and we are
3159 replacing it with a MEM, make a new MEM and try replacing the
3160 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3161 or if we would be widening it. */
3163 if (GET_CODE (op) == MEM
3164 && ! mode_dependent_address_p (XEXP (op, 0))
3165 /* Allow splitting of volatile memory references in case we don't
3166 have instruction to move the whole thing. */
3167 && (! MEM_VOLATILE_P (op)
3168 || ! have_insn_for (SET, innermode))
3169 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3170 return adjust_address_nv (op, outermode, byte);
3172 /* Handle complex values represented as CONCAT
3173 of real and imaginary part. */
3174 if (GET_CODE (op) == CONCAT)
3176 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
3177 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3178 unsigned int final_offset;
3181 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3182 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3185 /* We can at least simplify it by referring directly to the relevant part. */
3186 return gen_rtx_SUBREG (outermode, part, final_offset);
3191 /* Make a SUBREG operation or equivalent if it folds. */
3194 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3195 enum machine_mode innermode, unsigned int byte)
3198 /* Little bit of sanity checking. */
3199 if (innermode == VOIDmode || outermode == VOIDmode
3200 || innermode == BLKmode || outermode == BLKmode)
3203 if (GET_MODE (op) != innermode
3204 && GET_MODE (op) != VOIDmode)
3207 if (byte % GET_MODE_SIZE (outermode)
3208 || byte >= GET_MODE_SIZE (innermode))
3211 if (GET_CODE (op) == QUEUED)
3214 new = simplify_subreg (outermode, op, innermode, byte);
3218 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3221 return gen_rtx_SUBREG (outermode, op, byte);
3223 /* Simplify X, an rtx expression.
3225 Return the simplified expression or NULL if no simplifications
3228 This is the preferred entry point into the simplification routines;
3229 however, we still allow passes to call the more specific routines.
3231 Right now GCC has three (yes, three) major bodies of RTL simplification
3232 code that need to be unified.
3234 1. fold_rtx in cse.c. This code uses various CSE specific
3235 information to aid in RTL simplification.
3237 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3238 it uses combine specific information to aid in RTL
3241 3. The routines in this file.
3244 Long term we want to only have one body of simplification code; to
3245 get to that state I recommend the following steps:
3247 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3248 which are not pass dependent state into these routines.
3250 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3251 use this routine whenever possible.
3253 3. Allow for pass dependent state to be provided to these
3254 routines and add simplifications based on the pass dependent
3255 state. Remove code from cse.c & combine.c that becomes
3258 It will take time, but ultimately the compiler will be easier to
3259 maintain and improve. It's totally silly that when we add a
3260 simplification that it needs to be added to 4 places (3 for RTL
3261 simplification and 1 for tree simplification. */
3264 simplify_rtx (rtx x)
3266 enum rtx_code code = GET_CODE (x);
3267 enum machine_mode mode = GET_MODE (x);
3270 switch (GET_RTX_CLASS (code))
3273 return simplify_unary_operation (code, mode,
3274 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3276 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3277 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3279 /* Fall through.... */
3282 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3286 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3287 XEXP (x, 0), XEXP (x, 1),
3291 temp = simplify_relational_operation (code,
3292 ((GET_MODE (XEXP (x, 0))
3294 ? GET_MODE (XEXP (x, 0))
3295 : GET_MODE (XEXP (x, 1))),
3296 XEXP (x, 0), XEXP (x, 1));
3297 #ifdef FLOAT_STORE_FLAG_VALUE
3298 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3300 if (temp == const0_rtx)
3301 temp = CONST0_RTX (mode);
3303 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3311 return simplify_gen_subreg (mode, SUBREG_REG (x),
3312 GET_MODE (SUBREG_REG (x)),
3314 if (code == CONSTANT_P_RTX)
3316 if (CONSTANT_P (XEXP (x, 0)))
3324 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3325 if (GET_CODE (XEXP (x, 0)) == HIGH
3326 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))