1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code, mode, op0, op1)
109 enum machine_mode mode;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
117 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
119 || (GET_CODE (op0) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
122 tem = op0, op0 = op1, op1 = tem;
124 /* If this simplifies, do it. */
125 tem = simplify_binary_operation (code, mode, op0, op1);
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
133 if (code == PLUS && GET_CODE (op1) == CONST_INT
134 && GET_MODE (op0) != VOIDmode)
135 return plus_constant (op0, INTVAL (op1));
136 else if (code == MINUS && GET_CODE (op1) == CONST_INT
137 && GET_MODE (op0) != VOIDmode)
138 return plus_constant (op0, - INTVAL (op1));
140 return gen_rtx_fmt_ee (code, mode, op0, op1);
143 /* Make a unary operation by first seeing if it folds and otherwise making
144 the specified operation. */
147 simplify_gen_unary (code, mode, op, op_mode)
149 enum machine_mode mode;
151 enum machine_mode op_mode;
155 /* If this simplifies, use it. */
156 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
159 return gen_rtx_fmt_e (code, mode, op);
162 /* Likewise for ternary operations. */
165 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
167 enum machine_mode mode, op0_mode;
172 /* If this simplifies, use it. */
173 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
177 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
180 /* Likewise, for relational operations. */
183 simplify_gen_relational (code, mode, op0, op1)
185 enum machine_mode mode;
190 if ((tem = simplify_relational_operation (code, mode, op0, op1)) != 0)
193 /* Put complex operands first and constants second. */
194 if ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
195 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
196 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
197 || (GET_CODE (op0) == SUBREG
198 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
199 && GET_RTX_CLASS (GET_CODE (op1)) != 'o'))
200 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
202 return gen_rtx_fmt_ee (code, mode, op0, op1);
205 /* Replace all occurrences of OLD in X with NEW and try to simplify the
206 resulting RTX. Return a new RTX which is as simplified as possible. */
209 simplify_replace_rtx (x, old, new)
214 enum rtx_code code = GET_CODE (x);
215 enum machine_mode mode = GET_MODE (x);
217 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
218 to build a new expression substituting recursively. If we can't do
219 anything, return our input. */
224 switch (GET_RTX_CLASS (code))
228 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
229 rtx op = (XEXP (x, 0) == old
230 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
232 return simplify_gen_unary (code, mode, op, op_mode);
238 simplify_gen_binary (code, mode,
239 simplify_replace_rtx (XEXP (x, 0), old, new),
240 simplify_replace_rtx (XEXP (x, 1), old, new));
245 simplify_gen_ternary (code, mode, GET_MODE (XEXP (x, 0)),
246 simplify_replace_rtx (XEXP (x, 0), old, new),
247 simplify_replace_rtx (XEXP (x, 1), old, new),
248 simplify_replace_rtx (XEXP (x, 2), old, new));
251 /* The only case we try to handle is a lowpart SUBREG of a single-word
253 if (code == SUBREG && subreg_lowpart_p (x) && old == SUBREG_REG (x)
254 && GET_CODE (new) == CONST_INT
255 && GET_MODE_SIZE (GET_MODE (old)) <= UNITS_PER_WORD)
256 return GEN_INT (INTVAL (new) & GET_MODE_MASK (mode));
265 /* Try to simplify a unary operation CODE whose output mode is to be
266 MODE with input operand OP whose mode was originally OP_MODE.
267 Return zero if no simplification can be made. */
270 simplify_unary_operation (code, mode, op, op_mode)
272 enum machine_mode mode;
274 enum machine_mode op_mode;
276 unsigned int width = GET_MODE_BITSIZE (mode);
278 /* The order of these tests is critical so that, for example, we don't
279 check the wrong mode (input vs. output) for a conversion operation,
280 such as FIX. At some point, this should be simplified. */
282 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
284 if (code == FLOAT && GET_MODE (op) == VOIDmode
285 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
287 HOST_WIDE_INT hv, lv;
290 if (GET_CODE (op) == CONST_INT)
291 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
293 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
295 #ifdef REAL_ARITHMETIC
296 REAL_VALUE_FROM_INT (d, lv, hv, mode);
301 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
302 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
303 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
309 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
310 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
311 d += (double) (unsigned HOST_WIDE_INT) lv;
313 #endif /* REAL_ARITHMETIC */
314 d = real_value_truncate (mode, d);
315 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
317 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
318 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
320 HOST_WIDE_INT hv, lv;
323 if (GET_CODE (op) == CONST_INT)
324 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
326 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
328 if (op_mode == VOIDmode)
330 /* We don't know how to interpret negative-looking numbers in
331 this case, so don't try to fold those. */
335 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
338 hv = 0, lv &= GET_MODE_MASK (op_mode);
340 #ifdef REAL_ARITHMETIC
341 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
344 d = (double) (unsigned HOST_WIDE_INT) hv;
345 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
346 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
347 d += (double) (unsigned HOST_WIDE_INT) lv;
348 #endif /* REAL_ARITHMETIC */
349 d = real_value_truncate (mode, d);
350 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
354 if (GET_CODE (op) == CONST_INT
355 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
357 register HOST_WIDE_INT arg0 = INTVAL (op);
358 register HOST_WIDE_INT val;
371 val = (arg0 >= 0 ? arg0 : - arg0);
375 /* Don't use ffs here. Instead, get low order bit and then its
376 number. If arg0 is zero, this will return 0, as desired. */
377 arg0 &= GET_MODE_MASK (mode);
378 val = exact_log2 (arg0 & (- arg0)) + 1;
386 if (op_mode == VOIDmode)
388 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
390 /* If we were really extending the mode,
391 we would have to distinguish between zero-extension
392 and sign-extension. */
393 if (width != GET_MODE_BITSIZE (op_mode))
397 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
398 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
404 if (op_mode == VOIDmode)
406 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
408 /* If we were really extending the mode,
409 we would have to distinguish between zero-extension
410 and sign-extension. */
411 if (width != GET_MODE_BITSIZE (op_mode))
415 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
418 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
420 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
421 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
436 val = trunc_int_for_mode (val, mode);
438 return GEN_INT (val);
441 /* We can do some operations on integer CONST_DOUBLEs. Also allow
442 for a DImode operation on a CONST_INT. */
443 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
444 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
446 unsigned HOST_WIDE_INT l1, lv;
447 HOST_WIDE_INT h1, hv;
449 if (GET_CODE (op) == CONST_DOUBLE)
450 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
452 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
462 neg_double (l1, h1, &lv, &hv);
467 neg_double (l1, h1, &lv, &hv);
475 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
477 lv = exact_log2 (l1 & (-l1)) + 1;
481 /* This is just a change-of-mode, so do nothing. */
486 if (op_mode == VOIDmode
487 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
491 lv = l1 & GET_MODE_MASK (op_mode);
495 if (op_mode == VOIDmode
496 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
500 lv = l1 & GET_MODE_MASK (op_mode);
501 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
502 && (lv & ((HOST_WIDE_INT) 1
503 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
504 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
506 hv = HWI_SIGN_EXTEND (lv);
517 return immed_double_const (lv, hv, mode);
520 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
521 else if (GET_CODE (op) == CONST_DOUBLE
522 && GET_MODE_CLASS (mode) == MODE_FLOAT)
528 if (setjmp (handler))
529 /* There used to be a warning here, but that is inadvisable.
530 People may want to cause traps, and the natural way
531 to do it should not get a warning. */
534 set_float_handler (handler);
536 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
541 d = REAL_VALUE_NEGATE (d);
545 if (REAL_VALUE_NEGATIVE (d))
546 d = REAL_VALUE_NEGATE (d);
550 d = real_value_truncate (mode, d);
554 /* All this does is change the mode. */
558 d = REAL_VALUE_RNDZINT (d);
562 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
572 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
573 set_float_handler (NULL_PTR);
577 else if (GET_CODE (op) == CONST_DOUBLE
578 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
579 && GET_MODE_CLASS (mode) == MODE_INT
580 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
586 if (setjmp (handler))
589 set_float_handler (handler);
591 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
596 val = REAL_VALUE_FIX (d);
600 val = REAL_VALUE_UNSIGNED_FIX (d);
607 set_float_handler (NULL_PTR);
609 val = trunc_int_for_mode (val, mode);
611 return GEN_INT (val);
614 /* This was formerly used only for non-IEEE float.
615 eggert@twinsun.com says it is safe for IEEE also. */
618 enum rtx_code reversed;
619 /* There are some simplifications we can do even if the operands
624 /* (not (not X)) == X. */
625 if (GET_CODE (op) == NOT)
628 /* (not (eq X Y)) == (ne X Y), etc. */
629 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
630 && ((reversed = reversed_comparison_code (op, NULL_RTX))
632 return gen_rtx_fmt_ee (reversed,
633 op_mode, XEXP (op, 0), XEXP (op, 1));
637 /* (neg (neg X)) == X. */
638 if (GET_CODE (op) == NEG)
643 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
644 becomes just the MINUS if its mode is MODE. This allows
645 folding switch statements on machines using casesi (such as
647 if (GET_CODE (op) == TRUNCATE
648 && GET_MODE (XEXP (op, 0)) == mode
649 && GET_CODE (XEXP (op, 0)) == MINUS
650 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
651 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
654 #ifdef POINTERS_EXTEND_UNSIGNED
655 if (! POINTERS_EXTEND_UNSIGNED
656 && mode == Pmode && GET_MODE (op) == ptr_mode
658 || (GET_CODE (op) == SUBREG
659 && GET_CODE (SUBREG_REG (op)) == REG
660 && REG_POINTER (SUBREG_REG (op))
661 && GET_MODE (SUBREG_REG (op)) == Pmode)))
662 return convert_memory_address (Pmode, op);
666 #ifdef POINTERS_EXTEND_UNSIGNED
668 if (POINTERS_EXTEND_UNSIGNED
669 && mode == Pmode && GET_MODE (op) == ptr_mode
671 || (GET_CODE (op) == SUBREG
672 && GET_CODE (SUBREG_REG (op)) == REG
673 && REG_POINTER (SUBREG_REG (op))
674 && GET_MODE (SUBREG_REG (op)) == Pmode)))
675 return convert_memory_address (Pmode, op);
687 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
688 and OP1. Return 0 if no simplification is possible.
690 Don't use this for relational operations such as EQ or LT.
691 Use simplify_relational_operation instead. */
694 simplify_binary_operation (code, mode, op0, op1)
696 enum machine_mode mode;
699 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
701 unsigned int width = GET_MODE_BITSIZE (mode);
704 /* Relational operations don't work here. We must know the mode
705 of the operands in order to do the comparison correctly.
706 Assuming a full word can give incorrect results.
707 Consider comparing 128 with -128 in QImode. */
709 if (GET_RTX_CLASS (code) == '<')
712 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
713 if (GET_MODE_CLASS (mode) == MODE_FLOAT
714 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
715 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
717 REAL_VALUE_TYPE f0, f1, value;
720 if (setjmp (handler))
723 set_float_handler (handler);
725 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
726 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
727 f0 = real_value_truncate (mode, f0);
728 f1 = real_value_truncate (mode, f1);
730 #ifdef REAL_ARITHMETIC
731 #ifndef REAL_INFINITY
732 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
735 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
749 #ifndef REAL_INFINITY
756 value = MIN (f0, f1);
759 value = MAX (f0, f1);
766 value = real_value_truncate (mode, value);
767 set_float_handler (NULL_PTR);
768 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
770 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
772 /* We can fold some multi-word operations. */
773 if (GET_MODE_CLASS (mode) == MODE_INT
774 && width == HOST_BITS_PER_WIDE_INT * 2
775 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
776 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
778 unsigned HOST_WIDE_INT l1, l2, lv;
779 HOST_WIDE_INT h1, h2, hv;
781 if (GET_CODE (op0) == CONST_DOUBLE)
782 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
784 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
786 if (GET_CODE (op1) == CONST_DOUBLE)
787 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
789 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
794 /* A - B == A + (-B). */
795 neg_double (l2, h2, &lv, &hv);
798 /* .. fall through ... */
801 add_double (l1, h1, l2, h2, &lv, &hv);
805 mul_double (l1, h1, l2, h2, &lv, &hv);
808 case DIV: case MOD: case UDIV: case UMOD:
809 /* We'd need to include tree.h to do this and it doesn't seem worth
814 lv = l1 & l2, hv = h1 & h2;
818 lv = l1 | l2, hv = h1 | h2;
822 lv = l1 ^ l2, hv = h1 ^ h2;
828 && ((unsigned HOST_WIDE_INT) l1
829 < (unsigned HOST_WIDE_INT) l2)))
838 && ((unsigned HOST_WIDE_INT) l1
839 > (unsigned HOST_WIDE_INT) l2)))
846 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
848 && ((unsigned HOST_WIDE_INT) l1
849 < (unsigned HOST_WIDE_INT) l2)))
856 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
858 && ((unsigned HOST_WIDE_INT) l1
859 > (unsigned HOST_WIDE_INT) l2)))
865 case LSHIFTRT: case ASHIFTRT:
867 case ROTATE: case ROTATERT:
868 #ifdef SHIFT_COUNT_TRUNCATED
869 if (SHIFT_COUNT_TRUNCATED)
870 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
873 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
876 if (code == LSHIFTRT || code == ASHIFTRT)
877 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
879 else if (code == ASHIFT)
880 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
881 else if (code == ROTATE)
882 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
883 else /* code == ROTATERT */
884 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
891 return immed_double_const (lv, hv, mode);
894 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
895 || width > HOST_BITS_PER_WIDE_INT || width == 0)
897 /* Even if we can't compute a constant result,
898 there are some cases worth simplifying. */
903 /* In IEEE floating point, x+0 is not the same as x. Similarly
904 for the other optimizations below. */
905 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
906 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
909 if (op1 == CONST0_RTX (mode))
912 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
913 if (GET_CODE (op0) == NEG)
914 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
915 else if (GET_CODE (op1) == NEG)
916 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
918 /* Handle both-operands-constant cases. We can only add
919 CONST_INTs to constants since the sum of relocatable symbols
920 can't be handled by most assemblers. Don't add CONST_INT
921 to CONST_INT since overflow won't be computed properly if wider
922 than HOST_BITS_PER_WIDE_INT. */
924 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
925 && GET_CODE (op1) == CONST_INT)
926 return plus_constant (op0, INTVAL (op1));
927 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
928 && GET_CODE (op0) == CONST_INT)
929 return plus_constant (op1, INTVAL (op0));
931 /* See if this is something like X * C - X or vice versa or
932 if the multiplication is written as a shift. If so, we can
933 distribute and make a new multiply, shift, or maybe just
934 have X (if C is 2 in the example above). But don't make
935 real multiply if we didn't have one before. */
937 if (! FLOAT_MODE_P (mode))
939 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
940 rtx lhs = op0, rhs = op1;
943 if (GET_CODE (lhs) == NEG)
944 coeff0 = -1, lhs = XEXP (lhs, 0);
945 else if (GET_CODE (lhs) == MULT
946 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
948 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
951 else if (GET_CODE (lhs) == ASHIFT
952 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
953 && INTVAL (XEXP (lhs, 1)) >= 0
954 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
956 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
960 if (GET_CODE (rhs) == NEG)
961 coeff1 = -1, rhs = XEXP (rhs, 0);
962 else if (GET_CODE (rhs) == MULT
963 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
965 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
968 else if (GET_CODE (rhs) == ASHIFT
969 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
970 && INTVAL (XEXP (rhs, 1)) >= 0
971 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
973 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
977 if (rtx_equal_p (lhs, rhs))
979 tem = simplify_gen_binary (MULT, mode, lhs,
980 GEN_INT (coeff0 + coeff1));
981 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
985 /* If one of the operands is a PLUS or a MINUS, see if we can
986 simplify this by the associative law.
987 Don't use the associative law for floating point.
988 The inaccuracy makes it nonassociative,
989 and subtle programs can break if operations are associated. */
991 if (INTEGRAL_MODE_P (mode)
992 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
993 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
994 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1000 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1001 using cc0, in which case we want to leave it as a COMPARE
1002 so we can distinguish it from a register-register-copy.
1004 In IEEE floating point, x-0 is not the same as x. */
1006 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1007 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1008 && op1 == CONST0_RTX (mode))
1012 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1013 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1014 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1015 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1017 rtx xop00 = XEXP (op0, 0);
1018 rtx xop10 = XEXP (op1, 0);
1021 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1023 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1024 && GET_MODE (xop00) == GET_MODE (xop10)
1025 && REGNO (xop00) == REGNO (xop10)
1026 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1027 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1034 /* None of these optimizations can be done for IEEE
1036 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1037 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1040 /* We can't assume x-x is 0 even with non-IEEE floating point,
1041 but since it is zero except in very strange circumstances, we
1042 will treat it as zero with -funsafe-math-optimizations. */
1043 if (rtx_equal_p (op0, op1)
1044 && ! side_effects_p (op0)
1045 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1046 return CONST0_RTX (mode);
1048 /* Change subtraction from zero into negation. */
1049 if (op0 == CONST0_RTX (mode))
1050 return gen_rtx_NEG (mode, op1);
1052 /* (-1 - a) is ~a. */
1053 if (op0 == constm1_rtx)
1054 return gen_rtx_NOT (mode, op1);
1056 /* Subtracting 0 has no effect. */
1057 if (op1 == CONST0_RTX (mode))
1060 /* See if this is something like X * C - X or vice versa or
1061 if the multiplication is written as a shift. If so, we can
1062 distribute and make a new multiply, shift, or maybe just
1063 have X (if C is 2 in the example above). But don't make
1064 real multiply if we didn't have one before. */
1066 if (! FLOAT_MODE_P (mode))
1068 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1069 rtx lhs = op0, rhs = op1;
1072 if (GET_CODE (lhs) == NEG)
1073 coeff0 = -1, lhs = XEXP (lhs, 0);
1074 else if (GET_CODE (lhs) == MULT
1075 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1077 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1080 else if (GET_CODE (lhs) == ASHIFT
1081 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1082 && INTVAL (XEXP (lhs, 1)) >= 0
1083 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1085 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1086 lhs = XEXP (lhs, 0);
1089 if (GET_CODE (rhs) == NEG)
1090 coeff1 = - 1, rhs = XEXP (rhs, 0);
1091 else if (GET_CODE (rhs) == MULT
1092 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1094 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1097 else if (GET_CODE (rhs) == ASHIFT
1098 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1099 && INTVAL (XEXP (rhs, 1)) >= 0
1100 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1102 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1103 rhs = XEXP (rhs, 0);
1106 if (rtx_equal_p (lhs, rhs))
1108 tem = simplify_gen_binary (MULT, mode, lhs,
1109 GEN_INT (coeff0 - coeff1));
1110 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1114 /* (a - (-b)) -> (a + b). */
1115 if (GET_CODE (op1) == NEG)
1116 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1118 /* If one of the operands is a PLUS or a MINUS, see if we can
1119 simplify this by the associative law.
1120 Don't use the associative law for floating point.
1121 The inaccuracy makes it nonassociative,
1122 and subtle programs can break if operations are associated. */
1124 if (INTEGRAL_MODE_P (mode)
1125 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1126 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1127 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1130 /* Don't let a relocatable value get a negative coeff. */
1131 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1132 return plus_constant (op0, - INTVAL (op1));
1134 /* (x - (x & y)) -> (x & ~y) */
1135 if (GET_CODE (op1) == AND)
1137 if (rtx_equal_p (op0, XEXP (op1, 0)))
1138 return simplify_gen_binary (AND, mode, op0,
1139 gen_rtx_NOT (mode, XEXP (op1, 1)));
1140 if (rtx_equal_p (op0, XEXP (op1, 1)))
1141 return simplify_gen_binary (AND, mode, op0,
1142 gen_rtx_NOT (mode, XEXP (op1, 0)));
1147 if (op1 == constm1_rtx)
1149 tem = simplify_unary_operation (NEG, mode, op0, mode);
1151 return tem ? tem : gen_rtx_NEG (mode, op0);
1154 /* In IEEE floating point, x*0 is not always 0. */
1155 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1156 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1157 && op1 == CONST0_RTX (mode)
1158 && ! side_effects_p (op0))
1161 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1162 However, ANSI says we can drop signals,
1163 so we can do this anyway. */
1164 if (op1 == CONST1_RTX (mode))
1167 /* Convert multiply by constant power of two into shift unless
1168 we are still generating RTL. This test is a kludge. */
1169 if (GET_CODE (op1) == CONST_INT
1170 && (val = exact_log2 (INTVAL (op1))) >= 0
1171 /* If the mode is larger than the host word size, and the
1172 uppermost bit is set, then this isn't a power of two due
1173 to implicit sign extension. */
1174 && (width <= HOST_BITS_PER_WIDE_INT
1175 || val != HOST_BITS_PER_WIDE_INT - 1)
1176 && ! rtx_equal_function_value_matters)
1177 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1179 if (GET_CODE (op1) == CONST_DOUBLE
1180 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1184 int op1is2, op1ism1;
1186 if (setjmp (handler))
1189 set_float_handler (handler);
1190 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1191 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1192 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1193 set_float_handler (NULL_PTR);
1195 /* x*2 is x+x and x*(-1) is -x */
1196 if (op1is2 && GET_MODE (op0) == mode)
1197 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1199 else if (op1ism1 && GET_MODE (op0) == mode)
1200 return gen_rtx_NEG (mode, op0);
1205 if (op1 == const0_rtx)
1207 if (GET_CODE (op1) == CONST_INT
1208 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1210 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1212 /* A | (~A) -> -1 */
1213 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1214 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1215 && ! side_effects_p (op0)
1216 && GET_MODE_CLASS (mode) != MODE_CC)
1221 if (op1 == const0_rtx)
1223 if (GET_CODE (op1) == CONST_INT
1224 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1225 return gen_rtx_NOT (mode, op0);
1226 if (op0 == op1 && ! side_effects_p (op0)
1227 && GET_MODE_CLASS (mode) != MODE_CC)
1232 if (op1 == const0_rtx && ! side_effects_p (op0))
1234 if (GET_CODE (op1) == CONST_INT
1235 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1237 if (op0 == op1 && ! side_effects_p (op0)
1238 && GET_MODE_CLASS (mode) != MODE_CC)
1241 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1242 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1243 && ! side_effects_p (op0)
1244 && GET_MODE_CLASS (mode) != MODE_CC)
1249 /* Convert divide by power of two into shift (divide by 1 handled
1251 if (GET_CODE (op1) == CONST_INT
1252 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1253 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1255 /* ... fall through ... */
1258 if (op1 == CONST1_RTX (mode))
1261 /* In IEEE floating point, 0/x is not always 0. */
1262 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1263 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1264 && op0 == CONST0_RTX (mode)
1265 && ! side_effects_p (op1))
1268 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1269 /* Change division by a constant into multiplication. Only do
1270 this with -funsafe-math-optimizations. */
1271 else if (GET_CODE (op1) == CONST_DOUBLE
1272 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1273 && op1 != CONST0_RTX (mode)
1274 && flag_unsafe_math_optimizations)
1277 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1279 if (! REAL_VALUES_EQUAL (d, dconst0))
1281 #if defined (REAL_ARITHMETIC)
1282 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1283 return gen_rtx_MULT (mode, op0,
1284 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1287 gen_rtx_MULT (mode, op0,
1288 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1296 /* Handle modulus by power of two (mod with 1 handled below). */
1297 if (GET_CODE (op1) == CONST_INT
1298 && exact_log2 (INTVAL (op1)) > 0)
1299 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1301 /* ... fall through ... */
1304 if ((op0 == const0_rtx || op1 == const1_rtx)
1305 && ! side_effects_p (op0) && ! side_effects_p (op1))
1311 /* Rotating ~0 always results in ~0. */
1312 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1313 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1314 && ! side_effects_p (op1))
1317 /* ... fall through ... */
1322 if (op1 == const0_rtx)
1324 if (op0 == const0_rtx && ! side_effects_p (op1))
1329 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1330 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1331 && ! side_effects_p (op0))
1333 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1338 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1339 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1340 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1341 && ! side_effects_p (op0))
1343 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1348 if (op1 == const0_rtx && ! side_effects_p (op0))
1350 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1355 if (op1 == constm1_rtx && ! side_effects_p (op0))
1357 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1368 /* Get the integer argument values in two forms:
1369 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1371 arg0 = INTVAL (op0);
1372 arg1 = INTVAL (op1);
1374 if (width < HOST_BITS_PER_WIDE_INT)
1376 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1377 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1380 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1381 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1384 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1385 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1393 /* Compute the value of the arithmetic. */
1398 val = arg0s + arg1s;
1402 val = arg0s - arg1s;
1406 val = arg0s * arg1s;
1412 val = arg0s / arg1s;
1418 val = arg0s % arg1s;
1424 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1430 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1446 /* If shift count is undefined, don't fold it; let the machine do
1447 what it wants. But truncate it if the machine will do that. */
1451 #ifdef SHIFT_COUNT_TRUNCATED
1452 if (SHIFT_COUNT_TRUNCATED)
1456 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1463 #ifdef SHIFT_COUNT_TRUNCATED
1464 if (SHIFT_COUNT_TRUNCATED)
1468 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1475 #ifdef SHIFT_COUNT_TRUNCATED
1476 if (SHIFT_COUNT_TRUNCATED)
1480 val = arg0s >> arg1;
1482 /* Bootstrap compiler may not have sign extended the right shift.
1483 Manually extend the sign to insure bootstrap cc matches gcc. */
1484 if (arg0s < 0 && arg1 > 0)
1485 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1494 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1495 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1503 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1504 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1508 /* Do nothing here. */
1512 val = arg0s <= arg1s ? arg0s : arg1s;
1516 val = ((unsigned HOST_WIDE_INT) arg0
1517 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1521 val = arg0s > arg1s ? arg0s : arg1s;
1525 val = ((unsigned HOST_WIDE_INT) arg0
1526 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1533 val = trunc_int_for_mode (val, mode);
1535 return GEN_INT (val);
1538 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1541 Rather than test for specific case, we do this by a brute-force method
1542 and do all possible simplifications until no more changes occur. Then
1543 we rebuild the operation. */
1546 simplify_plus_minus (code, mode, op0, op1)
1548 enum machine_mode mode;
1554 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1555 int first = 1, negate = 0, changed;
1558 memset ((char *) ops, 0, sizeof ops);
1560 /* Set up the two operands and then expand them until nothing has been
1561 changed. If we run out of room in our array, give up; this should
1562 almost never happen. */
1564 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1571 for (i = 0; i < n_ops; i++)
1572 switch (GET_CODE (ops[i]))
1579 ops[n_ops] = XEXP (ops[i], 1);
1580 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1581 ops[i] = XEXP (ops[i], 0);
1587 ops[i] = XEXP (ops[i], 0);
1588 negs[i] = ! negs[i];
1593 ops[i] = XEXP (ops[i], 0);
1599 /* ~a -> (-a - 1) */
1602 ops[n_ops] = constm1_rtx;
1603 negs[n_ops++] = negs[i];
1604 ops[i] = XEXP (ops[i], 0);
1605 negs[i] = ! negs[i];
1612 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1620 /* If we only have two operands, we can't do anything. */
1624 /* Now simplify each pair of operands until nothing changes. The first
1625 time through just simplify constants against each other. */
1632 for (i = 0; i < n_ops - 1; i++)
1633 for (j = i + 1; j < n_ops; j++)
1634 if (ops[i] != 0 && ops[j] != 0
1635 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1637 rtx lhs = ops[i], rhs = ops[j];
1638 enum rtx_code ncode = PLUS;
1640 if (negs[i] && ! negs[j])
1641 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1642 else if (! negs[i] && negs[j])
1645 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1648 ops[i] = tem, ops[j] = 0;
1649 negs[i] = negs[i] && negs[j];
1650 if (GET_CODE (tem) == NEG)
1651 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1653 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1654 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1662 /* Pack all the operands to the lower-numbered entries and give up if
1663 we didn't reduce the number of operands we had. Make sure we
1664 count a CONST as two operands. If we have the same number of
1665 operands, but have made more CONSTs than we had, this is also
1666 an improvement, so accept it. */
1668 for (i = 0, j = 0; j < n_ops; j++)
1671 ops[i] = ops[j], negs[i++] = negs[j];
1672 if (GET_CODE (ops[j]) == CONST)
1676 if (i + n_consts > input_ops
1677 || (i + n_consts == input_ops && n_consts <= input_consts))
1682 /* If we have a CONST_INT, put it last. */
1683 for (i = 0; i < n_ops - 1; i++)
1684 if (GET_CODE (ops[i]) == CONST_INT)
1686 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1687 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1690 /* Put a non-negated operand first. If there aren't any, make all
1691 operands positive and negate the whole thing later. */
1692 for (i = 0; i < n_ops && negs[i]; i++)
1697 for (i = 0; i < n_ops; i++)
1703 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1704 j = negs[0], negs[0] = negs[i], negs[i] = j;
1707 /* Now make the result by performing the requested operations. */
1709 for (i = 1; i < n_ops; i++)
1710 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1712 return negate ? gen_rtx_NEG (mode, result) : result;
1717 rtx op0, op1; /* Input */
1718 int equal, op0lt, op1lt; /* Output */
1723 check_fold_consts (data)
1726 struct cfc_args *args = (struct cfc_args *) data;
1727 REAL_VALUE_TYPE d0, d1;
1729 /* We may possibly raise an exception while reading the value. */
1730 args->unordered = 1;
1731 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1732 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1734 /* Comparisons of Inf versus Inf are ordered. */
1735 if (REAL_VALUE_ISNAN (d0)
1736 || REAL_VALUE_ISNAN (d1))
1738 args->equal = REAL_VALUES_EQUAL (d0, d1);
1739 args->op0lt = REAL_VALUES_LESS (d0, d1);
1740 args->op1lt = REAL_VALUES_LESS (d1, d0);
1741 args->unordered = 0;
1744 /* Like simplify_binary_operation except used for relational operators.
1745 MODE is the mode of the operands, not that of the result. If MODE
1746 is VOIDmode, both operands must also be VOIDmode and we compare the
1747 operands in "infinite precision".
1749 If no simplification is possible, this function returns zero. Otherwise,
1750 it returns either const_true_rtx or const0_rtx. */
1753 simplify_relational_operation (code, mode, op0, op1)
1755 enum machine_mode mode;
1758 int equal, op0lt, op0ltu, op1lt, op1ltu;
1761 if (mode == VOIDmode
1762 && (GET_MODE (op0) != VOIDmode
1763 || GET_MODE (op1) != VOIDmode))
1766 /* If op0 is a compare, extract the comparison arguments from it. */
1767 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1768 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1770 /* We can't simplify MODE_CC values since we don't know what the
1771 actual comparison is. */
1772 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1779 /* Make sure the constant is second. */
1780 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1781 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1783 tem = op0, op0 = op1, op1 = tem;
1784 code = swap_condition (code);
1787 /* For integer comparisons of A and B maybe we can simplify A - B and can
1788 then simplify a comparison of that with zero. If A and B are both either
1789 a register or a CONST_INT, this can't help; testing for these cases will
1790 prevent infinite recursion here and speed things up.
1792 If CODE is an unsigned comparison, then we can never do this optimization,
1793 because it gives an incorrect result if the subtraction wraps around zero.
1794 ANSI C defines unsigned operations such that they never overflow, and
1795 thus such cases can not be ignored. */
1797 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1798 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1799 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1800 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1801 && code != GTU && code != GEU && code != LTU && code != LEU)
1802 return simplify_relational_operation (signed_condition (code),
1803 mode, tem, const0_rtx);
1805 if (flag_unsafe_math_optimizations && code == ORDERED)
1806 return const_true_rtx;
1808 if (flag_unsafe_math_optimizations && code == UNORDERED)
1811 /* For non-IEEE floating-point, if the two operands are equal, we know the
1813 if (rtx_equal_p (op0, op1)
1814 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1815 || ! FLOAT_MODE_P (GET_MODE (op0))
1816 || flag_unsafe_math_optimizations))
1817 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1819 /* If the operands are floating-point constants, see if we can fold
1821 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1822 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1823 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1825 struct cfc_args args;
1827 /* Setup input for check_fold_consts() */
1832 if (!do_float_handler (check_fold_consts, (PTR) &args))
1845 return const_true_rtx;
1858 /* Receive output from check_fold_consts() */
1860 op0lt = op0ltu = args.op0lt;
1861 op1lt = op1ltu = args.op1lt;
1863 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1865 /* Otherwise, see if the operands are both integers. */
1866 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1867 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1868 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1870 int width = GET_MODE_BITSIZE (mode);
1871 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1872 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1874 /* Get the two words comprising each integer constant. */
1875 if (GET_CODE (op0) == CONST_DOUBLE)
1877 l0u = l0s = CONST_DOUBLE_LOW (op0);
1878 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1882 l0u = l0s = INTVAL (op0);
1883 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1886 if (GET_CODE (op1) == CONST_DOUBLE)
1888 l1u = l1s = CONST_DOUBLE_LOW (op1);
1889 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1893 l1u = l1s = INTVAL (op1);
1894 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1897 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1898 we have to sign or zero-extend the values. */
1899 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1901 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1902 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1904 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1905 l0s |= ((HOST_WIDE_INT) (-1) << width);
1907 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1908 l1s |= ((HOST_WIDE_INT) (-1) << width);
1910 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1911 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1913 equal = (h0u == h1u && l0u == l1u);
1914 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1915 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1916 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1917 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1920 /* Otherwise, there are some code-specific tests we can make. */
1926 /* References to the frame plus a constant or labels cannot
1927 be zero, but a SYMBOL_REF can due to #pragma weak. */
1928 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1929 || GET_CODE (op0) == LABEL_REF)
1930 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1931 /* On some machines, the ap reg can be 0 sometimes. */
1932 && op0 != arg_pointer_rtx
1939 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1940 || GET_CODE (op0) == LABEL_REF)
1941 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1942 && op0 != arg_pointer_rtx
1945 return const_true_rtx;
1949 /* Unsigned values are never negative. */
1950 if (op1 == const0_rtx)
1951 return const_true_rtx;
1955 if (op1 == const0_rtx)
1960 /* Unsigned values are never greater than the largest
1962 if (GET_CODE (op1) == CONST_INT
1963 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1964 && INTEGRAL_MODE_P (mode))
1965 return const_true_rtx;
1969 if (GET_CODE (op1) == CONST_INT
1970 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1971 && INTEGRAL_MODE_P (mode))
1982 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1988 return equal ? const_true_rtx : const0_rtx;
1991 return ! equal ? const_true_rtx : const0_rtx;
1994 return op0lt ? const_true_rtx : const0_rtx;
1997 return op1lt ? const_true_rtx : const0_rtx;
1999 return op0ltu ? const_true_rtx : const0_rtx;
2001 return op1ltu ? const_true_rtx : const0_rtx;
2004 return equal || op0lt ? const_true_rtx : const0_rtx;
2007 return equal || op1lt ? const_true_rtx : const0_rtx;
2009 return equal || op0ltu ? const_true_rtx : const0_rtx;
2011 return equal || op1ltu ? const_true_rtx : const0_rtx;
2013 return const_true_rtx;
2021 /* Simplify CODE, an operation with result mode MODE and three operands,
2022 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2023 a constant. Return 0 if no simplifications is possible. */
2026 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2028 enum machine_mode mode, op0_mode;
2031 unsigned int width = GET_MODE_BITSIZE (mode);
2033 /* VOIDmode means "infinite" precision. */
2035 width = HOST_BITS_PER_WIDE_INT;
2041 if (GET_CODE (op0) == CONST_INT
2042 && GET_CODE (op1) == CONST_INT
2043 && GET_CODE (op2) == CONST_INT
2044 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2045 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2047 /* Extracting a bit-field from a constant */
2048 HOST_WIDE_INT val = INTVAL (op0);
2050 if (BITS_BIG_ENDIAN)
2051 val >>= (GET_MODE_BITSIZE (op0_mode)
2052 - INTVAL (op2) - INTVAL (op1));
2054 val >>= INTVAL (op2);
2056 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2058 /* First zero-extend. */
2059 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2060 /* If desired, propagate sign bit. */
2061 if (code == SIGN_EXTRACT
2062 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2063 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2066 /* Clear the bits that don't belong in our mode,
2067 unless they and our sign bit are all one.
2068 So we get either a reasonable negative value or a reasonable
2069 unsigned value for this mode. */
2070 if (width < HOST_BITS_PER_WIDE_INT
2071 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2072 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2073 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2075 return GEN_INT (val);
2080 if (GET_CODE (op0) == CONST_INT)
2081 return op0 != const0_rtx ? op1 : op2;
2083 /* Convert a == b ? b : a to "a". */
2084 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2085 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2086 && rtx_equal_p (XEXP (op0, 0), op1)
2087 && rtx_equal_p (XEXP (op0, 1), op2))
2089 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2090 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2091 && rtx_equal_p (XEXP (op0, 1), op1)
2092 && rtx_equal_p (XEXP (op0, 0), op2))
2094 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2096 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2097 ? GET_MODE (XEXP (op0, 1))
2098 : GET_MODE (XEXP (op0, 0)));
2100 if (cmp_mode == VOIDmode)
2101 cmp_mode = op0_mode;
2102 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2103 XEXP (op0, 0), XEXP (op0, 1));
2105 /* See if any simplifications were possible. */
2106 if (temp == const0_rtx)
2108 else if (temp == const1_rtx)
2113 /* Look for happy constants in op1 and op2. */
2114 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2116 HOST_WIDE_INT t = INTVAL (op1);
2117 HOST_WIDE_INT f = INTVAL (op2);
2119 if (t == STORE_FLAG_VALUE && f == 0)
2120 code = GET_CODE (op0);
2121 else if (t == 0 && f == STORE_FLAG_VALUE)
2124 tmp = reversed_comparison_code (op0, NULL_RTX);
2132 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2144 /* Simplify X, an rtx expression.
2146 Return the simplified expression or NULL if no simplifications
2149 This is the preferred entry point into the simplification routines;
2150 however, we still allow passes to call the more specific routines.
2152 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2153 code that need to be unified.
2155 1. fold_rtx in cse.c. This code uses various CSE specific
2156 information to aid in RTL simplification.
2158 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2159 it uses combine specific information to aid in RTL
2162 3. The routines in this file.
2165 Long term we want to only have one body of simplification code; to
2166 get to that state I recommend the following steps:
2168 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2169 which are not pass dependent state into these routines.
2171 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2172 use this routine whenever possible.
2174 3. Allow for pass dependent state to be provided to these
2175 routines and add simplifications based on the pass dependent
2176 state. Remove code from cse.c & combine.c that becomes
2179 It will take time, but ultimately the compiler will be easier to
2180 maintain and improve. It's totally silly that when we add a
2181 simplification that it needs to be added to 4 places (3 for RTL
2182 simplification and 1 for tree simplification. */
2188 enum rtx_code code = GET_CODE (x);
2189 enum machine_mode mode = GET_MODE (x);
2191 switch (GET_RTX_CLASS (code))
2194 return simplify_unary_operation (code, mode,
2195 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2198 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2202 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2203 XEXP (x, 0), XEXP (x, 1),
2207 return simplify_relational_operation (code,
2208 ((GET_MODE (XEXP (x, 0))
2210 ? GET_MODE (XEXP (x, 0))
2211 : GET_MODE (XEXP (x, 1))),
2212 XEXP (x, 0), XEXP (x, 1));