1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code, mode, op0, op1)
109 enum machine_mode mode;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && swap_commutative_operands_p (op0, op1))
117 tem = op0, op0 = op1, op1 = tem;
119 /* If this simplifies, do it. */
120 tem = simplify_binary_operation (code, mode, op0, op1);
125 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
126 just form the operation. */
128 if (code == PLUS && GET_CODE (op1) == CONST_INT
129 && GET_MODE (op0) != VOIDmode)
130 return plus_constant (op0, INTVAL (op1));
131 else if (code == MINUS && GET_CODE (op1) == CONST_INT
132 && GET_MODE (op0) != VOIDmode)
133 return plus_constant (op0, - INTVAL (op1));
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* Make a unary operation by first seeing if it folds and otherwise making
139 the specified operation. */
142 simplify_gen_unary (code, mode, op, op_mode)
144 enum machine_mode mode;
146 enum machine_mode op_mode;
150 /* If this simplifies, use it. */
151 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
154 return gen_rtx_fmt_e (code, mode, op);
157 /* Likewise for ternary operations. */
160 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
162 enum machine_mode mode, op0_mode;
167 /* If this simplifies, use it. */
168 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
172 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
175 /* Likewise, for relational operations.
176 CMP_MODE specifies mode comparison is done in.
180 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
182 enum machine_mode mode;
183 enum machine_mode cmp_mode;
188 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
191 /* Put complex operands first and constants second. */
192 if (swap_commutative_operands_p (op0, op1))
193 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
195 return gen_rtx_fmt_ee (code, mode, op0, op1);
198 /* Replace all occurrences of OLD in X with NEW and try to simplify the
199 resulting RTX. Return a new RTX which is as simplified as possible. */
202 simplify_replace_rtx (x, old, new)
207 enum rtx_code code = GET_CODE (x);
208 enum machine_mode mode = GET_MODE (x);
210 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
211 to build a new expression substituting recursively. If we can't do
212 anything, return our input. */
217 switch (GET_RTX_CLASS (code))
221 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
222 rtx op = (XEXP (x, 0) == old
223 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
225 return simplify_gen_unary (code, mode, op, op_mode);
231 simplify_gen_binary (code, mode,
232 simplify_replace_rtx (XEXP (x, 0), old, new),
233 simplify_replace_rtx (XEXP (x, 1), old, new));
236 simplify_gen_relational (code, mode,
237 (GET_MODE (XEXP (x, 0)) != VOIDmode
238 ? GET_MODE (XEXP (x, 0))
239 : GET_MODE (XEXP (x, 1))),
240 simplify_replace_rtx (XEXP (x, 0), old, new),
241 simplify_replace_rtx (XEXP (x, 1), old, new));
246 simplify_gen_ternary (code, mode, GET_MODE (XEXP (x, 0)),
247 simplify_replace_rtx (XEXP (x, 0), old, new),
248 simplify_replace_rtx (XEXP (x, 1), old, new),
249 simplify_replace_rtx (XEXP (x, 2), old, new));
252 /* The only case we try to handle is a SUBREG. */
256 exp = simplify_gen_subreg (GET_MODE (x),
257 simplify_replace_rtx (SUBREG_REG (x),
259 GET_MODE (SUBREG_REG (x)),
267 if (GET_CODE (x) == MEM)
269 replace_equiv_address_nv (x,
270 simplify_replace_rtx (XEXP (x, 0),
278 /* Try to simplify a unary operation CODE whose output mode is to be
279 MODE with input operand OP whose mode was originally OP_MODE.
280 Return zero if no simplification can be made. */
283 simplify_unary_operation (code, mode, op, op_mode)
285 enum machine_mode mode;
287 enum machine_mode op_mode;
289 unsigned int width = GET_MODE_BITSIZE (mode);
291 /* The order of these tests is critical so that, for example, we don't
292 check the wrong mode (input vs. output) for a conversion operation,
293 such as FIX. At some point, this should be simplified. */
295 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
297 if (code == FLOAT && GET_MODE (op) == VOIDmode
298 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
300 HOST_WIDE_INT hv, lv;
303 if (GET_CODE (op) == CONST_INT)
304 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
306 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
308 #ifdef REAL_ARITHMETIC
309 REAL_VALUE_FROM_INT (d, lv, hv, mode);
314 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
315 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
316 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
322 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
323 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
324 d += (double) (unsigned HOST_WIDE_INT) lv;
326 #endif /* REAL_ARITHMETIC */
327 d = real_value_truncate (mode, d);
328 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
330 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
331 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
333 HOST_WIDE_INT hv, lv;
336 if (GET_CODE (op) == CONST_INT)
337 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
339 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
341 if (op_mode == VOIDmode)
343 /* We don't know how to interpret negative-looking numbers in
344 this case, so don't try to fold those. */
348 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
351 hv = 0, lv &= GET_MODE_MASK (op_mode);
353 #ifdef REAL_ARITHMETIC
354 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
357 d = (double) (unsigned HOST_WIDE_INT) hv;
358 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
359 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
360 d += (double) (unsigned HOST_WIDE_INT) lv;
361 #endif /* REAL_ARITHMETIC */
362 d = real_value_truncate (mode, d);
363 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
367 if (GET_CODE (op) == CONST_INT
368 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
370 register HOST_WIDE_INT arg0 = INTVAL (op);
371 register HOST_WIDE_INT val;
384 val = (arg0 >= 0 ? arg0 : - arg0);
388 /* Don't use ffs here. Instead, get low order bit and then its
389 number. If arg0 is zero, this will return 0, as desired. */
390 arg0 &= GET_MODE_MASK (mode);
391 val = exact_log2 (arg0 & (- arg0)) + 1;
399 if (op_mode == VOIDmode)
401 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
403 /* If we were really extending the mode,
404 we would have to distinguish between zero-extension
405 and sign-extension. */
406 if (width != GET_MODE_BITSIZE (op_mode))
410 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
411 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
417 if (op_mode == VOIDmode)
419 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
421 /* If we were really extending the mode,
422 we would have to distinguish between zero-extension
423 and sign-extension. */
424 if (width != GET_MODE_BITSIZE (op_mode))
428 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
431 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
433 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
434 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
449 val = trunc_int_for_mode (val, mode);
451 return GEN_INT (val);
454 /* We can do some operations on integer CONST_DOUBLEs. Also allow
455 for a DImode operation on a CONST_INT. */
456 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
457 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
459 unsigned HOST_WIDE_INT l1, lv;
460 HOST_WIDE_INT h1, hv;
462 if (GET_CODE (op) == CONST_DOUBLE)
463 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
465 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
475 neg_double (l1, h1, &lv, &hv);
480 neg_double (l1, h1, &lv, &hv);
488 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
490 lv = exact_log2 (l1 & (-l1)) + 1;
494 /* This is just a change-of-mode, so do nothing. */
499 if (op_mode == VOIDmode
500 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
504 lv = l1 & GET_MODE_MASK (op_mode);
508 if (op_mode == VOIDmode
509 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
513 lv = l1 & GET_MODE_MASK (op_mode);
514 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
515 && (lv & ((HOST_WIDE_INT) 1
516 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
517 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
519 hv = HWI_SIGN_EXTEND (lv);
530 return immed_double_const (lv, hv, mode);
533 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
534 else if (GET_CODE (op) == CONST_DOUBLE
535 && GET_MODE_CLASS (mode) == MODE_FLOAT)
541 if (setjmp (handler))
542 /* There used to be a warning here, but that is inadvisable.
543 People may want to cause traps, and the natural way
544 to do it should not get a warning. */
547 set_float_handler (handler);
549 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
554 d = REAL_VALUE_NEGATE (d);
558 if (REAL_VALUE_NEGATIVE (d))
559 d = REAL_VALUE_NEGATE (d);
563 d = real_value_truncate (mode, d);
567 /* All this does is change the mode. */
571 d = REAL_VALUE_RNDZINT (d);
575 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
585 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
586 set_float_handler (NULL);
590 else if (GET_CODE (op) == CONST_DOUBLE
591 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
592 && GET_MODE_CLASS (mode) == MODE_INT
593 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
599 if (setjmp (handler))
602 set_float_handler (handler);
604 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
609 val = REAL_VALUE_FIX (d);
613 val = REAL_VALUE_UNSIGNED_FIX (d);
620 set_float_handler (NULL);
622 val = trunc_int_for_mode (val, mode);
624 return GEN_INT (val);
627 /* This was formerly used only for non-IEEE float.
628 eggert@twinsun.com says it is safe for IEEE also. */
631 enum rtx_code reversed;
632 /* There are some simplifications we can do even if the operands
637 /* (not (not X)) == X. */
638 if (GET_CODE (op) == NOT)
641 /* (not (eq X Y)) == (ne X Y), etc. */
642 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
643 && ((reversed = reversed_comparison_code (op, NULL_RTX))
645 return gen_rtx_fmt_ee (reversed,
646 op_mode, XEXP (op, 0), XEXP (op, 1));
650 /* (neg (neg X)) == X. */
651 if (GET_CODE (op) == NEG)
656 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
657 becomes just the MINUS if its mode is MODE. This allows
658 folding switch statements on machines using casesi (such as
660 if (GET_CODE (op) == TRUNCATE
661 && GET_MODE (XEXP (op, 0)) == mode
662 && GET_CODE (XEXP (op, 0)) == MINUS
663 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
664 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
667 #ifdef POINTERS_EXTEND_UNSIGNED
668 if (! POINTERS_EXTEND_UNSIGNED
669 && mode == Pmode && GET_MODE (op) == ptr_mode
671 || (GET_CODE (op) == SUBREG
672 && GET_CODE (SUBREG_REG (op)) == REG
673 && REG_POINTER (SUBREG_REG (op))
674 && GET_MODE (SUBREG_REG (op)) == Pmode)))
675 return convert_memory_address (Pmode, op);
679 #ifdef POINTERS_EXTEND_UNSIGNED
681 if (POINTERS_EXTEND_UNSIGNED
682 && mode == Pmode && GET_MODE (op) == ptr_mode
684 || (GET_CODE (op) == SUBREG
685 && GET_CODE (SUBREG_REG (op)) == REG
686 && REG_POINTER (SUBREG_REG (op))
687 && GET_MODE (SUBREG_REG (op)) == Pmode)))
688 return convert_memory_address (Pmode, op);
700 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
701 and OP1. Return 0 if no simplification is possible.
703 Don't use this for relational operations such as EQ or LT.
704 Use simplify_relational_operation instead. */
707 simplify_binary_operation (code, mode, op0, op1)
709 enum machine_mode mode;
712 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
714 unsigned int width = GET_MODE_BITSIZE (mode);
717 /* Relational operations don't work here. We must know the mode
718 of the operands in order to do the comparison correctly.
719 Assuming a full word can give incorrect results.
720 Consider comparing 128 with -128 in QImode. */
722 if (GET_RTX_CLASS (code) == '<')
725 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
726 if (GET_MODE_CLASS (mode) == MODE_FLOAT
727 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
728 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
730 REAL_VALUE_TYPE f0, f1, value;
733 if (setjmp (handler))
736 set_float_handler (handler);
738 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
739 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
740 f0 = real_value_truncate (mode, f0);
741 f1 = real_value_truncate (mode, f1);
743 #ifdef REAL_ARITHMETIC
744 #ifndef REAL_INFINITY
745 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
748 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
762 #ifndef REAL_INFINITY
769 value = MIN (f0, f1);
772 value = MAX (f0, f1);
779 value = real_value_truncate (mode, value);
780 set_float_handler (NULL);
781 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
783 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
785 /* We can fold some multi-word operations. */
786 if (GET_MODE_CLASS (mode) == MODE_INT
787 && width == HOST_BITS_PER_WIDE_INT * 2
788 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
789 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
791 unsigned HOST_WIDE_INT l1, l2, lv;
792 HOST_WIDE_INT h1, h2, hv;
794 if (GET_CODE (op0) == CONST_DOUBLE)
795 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
797 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
799 if (GET_CODE (op1) == CONST_DOUBLE)
800 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
802 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
807 /* A - B == A + (-B). */
808 neg_double (l2, h2, &lv, &hv);
811 /* .. fall through ... */
814 add_double (l1, h1, l2, h2, &lv, &hv);
818 mul_double (l1, h1, l2, h2, &lv, &hv);
821 case DIV: case MOD: case UDIV: case UMOD:
822 /* We'd need to include tree.h to do this and it doesn't seem worth
827 lv = l1 & l2, hv = h1 & h2;
831 lv = l1 | l2, hv = h1 | h2;
835 lv = l1 ^ l2, hv = h1 ^ h2;
841 && ((unsigned HOST_WIDE_INT) l1
842 < (unsigned HOST_WIDE_INT) l2)))
851 && ((unsigned HOST_WIDE_INT) l1
852 > (unsigned HOST_WIDE_INT) l2)))
859 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
861 && ((unsigned HOST_WIDE_INT) l1
862 < (unsigned HOST_WIDE_INT) l2)))
869 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
871 && ((unsigned HOST_WIDE_INT) l1
872 > (unsigned HOST_WIDE_INT) l2)))
878 case LSHIFTRT: case ASHIFTRT:
880 case ROTATE: case ROTATERT:
881 #ifdef SHIFT_COUNT_TRUNCATED
882 if (SHIFT_COUNT_TRUNCATED)
883 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
886 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
889 if (code == LSHIFTRT || code == ASHIFTRT)
890 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
892 else if (code == ASHIFT)
893 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
894 else if (code == ROTATE)
895 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
896 else /* code == ROTATERT */
897 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
904 return immed_double_const (lv, hv, mode);
907 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
908 || width > HOST_BITS_PER_WIDE_INT || width == 0)
910 /* Even if we can't compute a constant result,
911 there are some cases worth simplifying. */
916 /* In IEEE floating point, x+0 is not the same as x. Similarly
917 for the other optimizations below. */
918 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
919 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
922 if (op1 == CONST0_RTX (mode))
925 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
926 if (GET_CODE (op0) == NEG)
927 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
928 else if (GET_CODE (op1) == NEG)
929 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
932 if (INTEGRAL_MODE_P (mode)
933 && GET_CODE (op0) == NOT
934 && GET_CODE (op1) == CONST_INT
935 && INTVAL (op1) == 1)
936 return gen_rtx_NEG (mode, XEXP (op0, 0));
938 /* Handle both-operands-constant cases. We can only add
939 CONST_INTs to constants since the sum of relocatable symbols
940 can't be handled by most assemblers. Don't add CONST_INT
941 to CONST_INT since overflow won't be computed properly if wider
942 than HOST_BITS_PER_WIDE_INT. */
944 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
945 && GET_CODE (op1) == CONST_INT)
946 return plus_constant (op0, INTVAL (op1));
947 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
948 && GET_CODE (op0) == CONST_INT)
949 return plus_constant (op1, INTVAL (op0));
951 /* See if this is something like X * C - X or vice versa or
952 if the multiplication is written as a shift. If so, we can
953 distribute and make a new multiply, shift, or maybe just
954 have X (if C is 2 in the example above). But don't make
955 real multiply if we didn't have one before. */
957 if (! FLOAT_MODE_P (mode))
959 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
960 rtx lhs = op0, rhs = op1;
963 if (GET_CODE (lhs) == NEG)
964 coeff0 = -1, lhs = XEXP (lhs, 0);
965 else if (GET_CODE (lhs) == MULT
966 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
968 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
971 else if (GET_CODE (lhs) == ASHIFT
972 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
973 && INTVAL (XEXP (lhs, 1)) >= 0
974 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
976 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
980 if (GET_CODE (rhs) == NEG)
981 coeff1 = -1, rhs = XEXP (rhs, 0);
982 else if (GET_CODE (rhs) == MULT
983 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
985 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
988 else if (GET_CODE (rhs) == ASHIFT
989 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
990 && INTVAL (XEXP (rhs, 1)) >= 0
991 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
993 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
997 if (rtx_equal_p (lhs, rhs))
999 tem = simplify_gen_binary (MULT, mode, lhs,
1000 GEN_INT (coeff0 + coeff1));
1001 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1005 /* If one of the operands is a PLUS or a MINUS, see if we can
1006 simplify this by the associative law.
1007 Don't use the associative law for floating point.
1008 The inaccuracy makes it nonassociative,
1009 and subtle programs can break if operations are associated. */
1011 if (INTEGRAL_MODE_P (mode)
1012 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1013 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1014 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1020 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1021 using cc0, in which case we want to leave it as a COMPARE
1022 so we can distinguish it from a register-register-copy.
1024 In IEEE floating point, x-0 is not the same as x. */
1026 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1027 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1028 && op1 == CONST0_RTX (mode))
1032 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1033 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1034 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1035 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1037 rtx xop00 = XEXP (op0, 0);
1038 rtx xop10 = XEXP (op1, 0);
1041 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1043 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1044 && GET_MODE (xop00) == GET_MODE (xop10)
1045 && REGNO (xop00) == REGNO (xop10)
1046 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1047 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1054 /* None of these optimizations can be done for IEEE
1056 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1057 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1060 /* We can't assume x-x is 0 even with non-IEEE floating point,
1061 but since it is zero except in very strange circumstances, we
1062 will treat it as zero with -funsafe-math-optimizations. */
1063 if (rtx_equal_p (op0, op1)
1064 && ! side_effects_p (op0)
1065 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1066 return CONST0_RTX (mode);
1068 /* Change subtraction from zero into negation. */
1069 if (op0 == CONST0_RTX (mode))
1070 return gen_rtx_NEG (mode, op1);
1072 /* (-1 - a) is ~a. */
1073 if (op0 == constm1_rtx)
1074 return gen_rtx_NOT (mode, op1);
1076 /* Subtracting 0 has no effect. */
1077 if (op1 == CONST0_RTX (mode))
1080 /* See if this is something like X * C - X or vice versa or
1081 if the multiplication is written as a shift. If so, we can
1082 distribute and make a new multiply, shift, or maybe just
1083 have X (if C is 2 in the example above). But don't make
1084 real multiply if we didn't have one before. */
1086 if (! FLOAT_MODE_P (mode))
1088 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1089 rtx lhs = op0, rhs = op1;
1092 if (GET_CODE (lhs) == NEG)
1093 coeff0 = -1, lhs = XEXP (lhs, 0);
1094 else if (GET_CODE (lhs) == MULT
1095 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1097 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1100 else if (GET_CODE (lhs) == ASHIFT
1101 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1102 && INTVAL (XEXP (lhs, 1)) >= 0
1103 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1105 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1106 lhs = XEXP (lhs, 0);
1109 if (GET_CODE (rhs) == NEG)
1110 coeff1 = - 1, rhs = XEXP (rhs, 0);
1111 else if (GET_CODE (rhs) == MULT
1112 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1114 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1117 else if (GET_CODE (rhs) == ASHIFT
1118 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1119 && INTVAL (XEXP (rhs, 1)) >= 0
1120 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1122 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1123 rhs = XEXP (rhs, 0);
1126 if (rtx_equal_p (lhs, rhs))
1128 tem = simplify_gen_binary (MULT, mode, lhs,
1129 GEN_INT (coeff0 - coeff1));
1130 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1134 /* (a - (-b)) -> (a + b). */
1135 if (GET_CODE (op1) == NEG)
1136 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1138 /* If one of the operands is a PLUS or a MINUS, see if we can
1139 simplify this by the associative law.
1140 Don't use the associative law for floating point.
1141 The inaccuracy makes it nonassociative,
1142 and subtle programs can break if operations are associated. */
1144 if (INTEGRAL_MODE_P (mode)
1145 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1146 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1147 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1150 /* Don't let a relocatable value get a negative coeff. */
1151 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1152 return plus_constant (op0, - INTVAL (op1));
1154 /* (x - (x & y)) -> (x & ~y) */
1155 if (GET_CODE (op1) == AND)
1157 if (rtx_equal_p (op0, XEXP (op1, 0)))
1158 return simplify_gen_binary (AND, mode, op0,
1159 gen_rtx_NOT (mode, XEXP (op1, 1)));
1160 if (rtx_equal_p (op0, XEXP (op1, 1)))
1161 return simplify_gen_binary (AND, mode, op0,
1162 gen_rtx_NOT (mode, XEXP (op1, 0)));
1167 if (op1 == constm1_rtx)
1169 tem = simplify_unary_operation (NEG, mode, op0, mode);
1171 return tem ? tem : gen_rtx_NEG (mode, op0);
1174 /* In IEEE floating point, x*0 is not always 0. */
1175 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1176 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1177 && op1 == CONST0_RTX (mode)
1178 && ! side_effects_p (op0))
1181 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1182 However, ANSI says we can drop signals,
1183 so we can do this anyway. */
1184 if (op1 == CONST1_RTX (mode))
1187 /* Convert multiply by constant power of two into shift unless
1188 we are still generating RTL. This test is a kludge. */
1189 if (GET_CODE (op1) == CONST_INT
1190 && (val = exact_log2 (INTVAL (op1))) >= 0
1191 /* If the mode is larger than the host word size, and the
1192 uppermost bit is set, then this isn't a power of two due
1193 to implicit sign extension. */
1194 && (width <= HOST_BITS_PER_WIDE_INT
1195 || val != HOST_BITS_PER_WIDE_INT - 1)
1196 && ! rtx_equal_function_value_matters)
1197 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1199 if (GET_CODE (op1) == CONST_DOUBLE
1200 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1204 int op1is2, op1ism1;
1206 if (setjmp (handler))
1209 set_float_handler (handler);
1210 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1211 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1212 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1213 set_float_handler (NULL);
1215 /* x*2 is x+x and x*(-1) is -x */
1216 if (op1is2 && GET_MODE (op0) == mode)
1217 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1219 else if (op1ism1 && GET_MODE (op0) == mode)
1220 return gen_rtx_NEG (mode, op0);
1225 if (op1 == const0_rtx)
1227 if (GET_CODE (op1) == CONST_INT
1228 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1230 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1232 /* A | (~A) -> -1 */
1233 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1234 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1235 && ! side_effects_p (op0)
1236 && GET_MODE_CLASS (mode) != MODE_CC)
1241 if (op1 == const0_rtx)
1243 if (GET_CODE (op1) == CONST_INT
1244 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1245 return gen_rtx_NOT (mode, op0);
1246 if (op0 == op1 && ! side_effects_p (op0)
1247 && GET_MODE_CLASS (mode) != MODE_CC)
1252 if (op1 == const0_rtx && ! side_effects_p (op0))
1254 if (GET_CODE (op1) == CONST_INT
1255 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1257 if (op0 == op1 && ! side_effects_p (op0)
1258 && GET_MODE_CLASS (mode) != MODE_CC)
1261 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1262 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1263 && ! side_effects_p (op0)
1264 && GET_MODE_CLASS (mode) != MODE_CC)
1269 /* Convert divide by power of two into shift (divide by 1 handled
1271 if (GET_CODE (op1) == CONST_INT
1272 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1273 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1275 /* ... fall through ... */
1278 if (op1 == CONST1_RTX (mode))
1281 /* In IEEE floating point, 0/x is not always 0. */
1282 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1283 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1284 && op0 == CONST0_RTX (mode)
1285 && ! side_effects_p (op1))
1288 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1289 /* Change division by a constant into multiplication. Only do
1290 this with -funsafe-math-optimizations. */
1291 else if (GET_CODE (op1) == CONST_DOUBLE
1292 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1293 && op1 != CONST0_RTX (mode)
1294 && flag_unsafe_math_optimizations)
1297 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1299 if (! REAL_VALUES_EQUAL (d, dconst0))
1301 #if defined (REAL_ARITHMETIC)
1302 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1303 return gen_rtx_MULT (mode, op0,
1304 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1307 gen_rtx_MULT (mode, op0,
1308 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1316 /* Handle modulus by power of two (mod with 1 handled below). */
1317 if (GET_CODE (op1) == CONST_INT
1318 && exact_log2 (INTVAL (op1)) > 0)
1319 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1321 /* ... fall through ... */
1324 if ((op0 == const0_rtx || op1 == const1_rtx)
1325 && ! side_effects_p (op0) && ! side_effects_p (op1))
1331 /* Rotating ~0 always results in ~0. */
1332 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1333 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1334 && ! side_effects_p (op1))
1337 /* ... fall through ... */
1342 if (op1 == const0_rtx)
1344 if (op0 == const0_rtx && ! side_effects_p (op1))
1349 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1350 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1351 && ! side_effects_p (op0))
1353 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1358 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1359 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1360 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1361 && ! side_effects_p (op0))
1363 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1368 if (op1 == const0_rtx && ! side_effects_p (op0))
1370 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1375 if (op1 == constm1_rtx && ! side_effects_p (op0))
1377 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1388 /* Get the integer argument values in two forms:
1389 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1391 arg0 = INTVAL (op0);
1392 arg1 = INTVAL (op1);
1394 if (width < HOST_BITS_PER_WIDE_INT)
1396 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1397 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1400 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1401 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1404 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1405 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1413 /* Compute the value of the arithmetic. */
1418 val = arg0s + arg1s;
1422 val = arg0s - arg1s;
1426 val = arg0s * arg1s;
1431 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1434 val = arg0s / arg1s;
1439 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1442 val = arg0s % arg1s;
1447 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1450 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1455 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1458 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1474 /* If shift count is undefined, don't fold it; let the machine do
1475 what it wants. But truncate it if the machine will do that. */
1479 #ifdef SHIFT_COUNT_TRUNCATED
1480 if (SHIFT_COUNT_TRUNCATED)
1484 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1491 #ifdef SHIFT_COUNT_TRUNCATED
1492 if (SHIFT_COUNT_TRUNCATED)
1496 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1503 #ifdef SHIFT_COUNT_TRUNCATED
1504 if (SHIFT_COUNT_TRUNCATED)
1508 val = arg0s >> arg1;
1510 /* Bootstrap compiler may not have sign extended the right shift.
1511 Manually extend the sign to insure bootstrap cc matches gcc. */
1512 if (arg0s < 0 && arg1 > 0)
1513 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1522 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1523 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1531 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1532 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1536 /* Do nothing here. */
1540 val = arg0s <= arg1s ? arg0s : arg1s;
1544 val = ((unsigned HOST_WIDE_INT) arg0
1545 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1549 val = arg0s > arg1s ? arg0s : arg1s;
1553 val = ((unsigned HOST_WIDE_INT) arg0
1554 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1561 val = trunc_int_for_mode (val, mode);
1563 return GEN_INT (val);
1566 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1569 Rather than test for specific case, we do this by a brute-force method
1570 and do all possible simplifications until no more changes occur. Then
1571 we rebuild the operation. */
1574 simplify_plus_minus (code, mode, op0, op1)
1576 enum machine_mode mode;
1582 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1583 int first = 1, negate = 0, changed;
1586 memset ((char *) ops, 0, sizeof ops);
1588 /* Set up the two operands and then expand them until nothing has been
1589 changed. If we run out of room in our array, give up; this should
1590 almost never happen. */
1592 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1599 for (i = 0; i < n_ops; i++)
1600 switch (GET_CODE (ops[i]))
1607 ops[n_ops] = XEXP (ops[i], 1);
1608 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1609 ops[i] = XEXP (ops[i], 0);
1615 ops[i] = XEXP (ops[i], 0);
1616 negs[i] = ! negs[i];
1621 ops[i] = XEXP (ops[i], 0);
1627 /* ~a -> (-a - 1) */
1630 ops[n_ops] = constm1_rtx;
1631 negs[n_ops++] = negs[i];
1632 ops[i] = XEXP (ops[i], 0);
1633 negs[i] = ! negs[i];
1640 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1648 /* If we only have two operands, we can't do anything. */
1652 /* Now simplify each pair of operands until nothing changes. The first
1653 time through just simplify constants against each other. */
1660 for (i = 0; i < n_ops - 1; i++)
1661 for (j = i + 1; j < n_ops; j++)
1662 if (ops[i] != 0 && ops[j] != 0
1663 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1665 rtx lhs = ops[i], rhs = ops[j];
1666 enum rtx_code ncode = PLUS;
1668 if (negs[i] && ! negs[j])
1669 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1670 else if (! negs[i] && negs[j])
1673 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1676 ops[i] = tem, ops[j] = 0;
1677 negs[i] = negs[i] && negs[j];
1678 if (GET_CODE (tem) == NEG)
1679 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1681 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1682 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1690 /* Pack all the operands to the lower-numbered entries and give up if
1691 we didn't reduce the number of operands we had. Make sure we
1692 count a CONST as two operands. If we have the same number of
1693 operands, but have made more CONSTs than we had, this is also
1694 an improvement, so accept it. */
1696 for (i = 0, j = 0; j < n_ops; j++)
1699 ops[i] = ops[j], negs[i++] = negs[j];
1700 if (GET_CODE (ops[j]) == CONST)
1704 if (i + n_consts > input_ops
1705 || (i + n_consts == input_ops && n_consts <= input_consts))
1710 /* If we have a CONST_INT, put it last. */
1711 for (i = 0; i < n_ops - 1; i++)
1712 if (GET_CODE (ops[i]) == CONST_INT)
1714 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1715 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1718 /* Put a non-negated operand first. If there aren't any, make all
1719 operands positive and negate the whole thing later. */
1720 for (i = 0; i < n_ops && negs[i]; i++)
1725 for (i = 0; i < n_ops; i++)
1731 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1732 j = negs[0], negs[0] = negs[i], negs[i] = j;
1735 /* Now make the result by performing the requested operations. */
1737 for (i = 1; i < n_ops; i++)
1738 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1740 return negate ? gen_rtx_NEG (mode, result) : result;
1745 rtx op0, op1; /* Input */
1746 int equal, op0lt, op1lt; /* Output */
1751 check_fold_consts (data)
1754 struct cfc_args *args = (struct cfc_args *) data;
1755 REAL_VALUE_TYPE d0, d1;
1757 /* We may possibly raise an exception while reading the value. */
1758 args->unordered = 1;
1759 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1760 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1762 /* Comparisons of Inf versus Inf are ordered. */
1763 if (REAL_VALUE_ISNAN (d0)
1764 || REAL_VALUE_ISNAN (d1))
1766 args->equal = REAL_VALUES_EQUAL (d0, d1);
1767 args->op0lt = REAL_VALUES_LESS (d0, d1);
1768 args->op1lt = REAL_VALUES_LESS (d1, d0);
1769 args->unordered = 0;
1772 /* Like simplify_binary_operation except used for relational operators.
1773 MODE is the mode of the operands, not that of the result. If MODE
1774 is VOIDmode, both operands must also be VOIDmode and we compare the
1775 operands in "infinite precision".
1777 If no simplification is possible, this function returns zero. Otherwise,
1778 it returns either const_true_rtx or const0_rtx. */
1781 simplify_relational_operation (code, mode, op0, op1)
1783 enum machine_mode mode;
1786 int equal, op0lt, op0ltu, op1lt, op1ltu;
1789 if (mode == VOIDmode
1790 && (GET_MODE (op0) != VOIDmode
1791 || GET_MODE (op1) != VOIDmode))
1794 /* If op0 is a compare, extract the comparison arguments from it. */
1795 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1796 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1798 /* We can't simplify MODE_CC values since we don't know what the
1799 actual comparison is. */
1800 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1807 /* Make sure the constant is second. */
1808 if (swap_commutative_operands_p (op0, op1))
1810 tem = op0, op0 = op1, op1 = tem;
1811 code = swap_condition (code);
1814 /* For integer comparisons of A and B maybe we can simplify A - B and can
1815 then simplify a comparison of that with zero. If A and B are both either
1816 a register or a CONST_INT, this can't help; testing for these cases will
1817 prevent infinite recursion here and speed things up.
1819 If CODE is an unsigned comparison, then we can never do this optimization,
1820 because it gives an incorrect result if the subtraction wraps around zero.
1821 ANSI C defines unsigned operations such that they never overflow, and
1822 thus such cases can not be ignored. */
1824 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1825 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1826 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1827 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1828 && code != GTU && code != GEU && code != LTU && code != LEU)
1829 return simplify_relational_operation (signed_condition (code),
1830 mode, tem, const0_rtx);
1832 if (flag_unsafe_math_optimizations && code == ORDERED)
1833 return const_true_rtx;
1835 if (flag_unsafe_math_optimizations && code == UNORDERED)
1838 /* For non-IEEE floating-point, if the two operands are equal, we know the
1840 if (rtx_equal_p (op0, op1)
1841 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1842 || ! FLOAT_MODE_P (GET_MODE (op0))
1843 || flag_unsafe_math_optimizations))
1844 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1846 /* If the operands are floating-point constants, see if we can fold
1848 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1849 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1850 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1852 struct cfc_args args;
1854 /* Setup input for check_fold_consts() */
1859 if (!do_float_handler (check_fold_consts, (PTR) &args))
1872 return const_true_rtx;
1885 /* Receive output from check_fold_consts() */
1887 op0lt = op0ltu = args.op0lt;
1888 op1lt = op1ltu = args.op1lt;
1890 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1892 /* Otherwise, see if the operands are both integers. */
1893 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1894 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1895 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1897 int width = GET_MODE_BITSIZE (mode);
1898 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1899 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1901 /* Get the two words comprising each integer constant. */
1902 if (GET_CODE (op0) == CONST_DOUBLE)
1904 l0u = l0s = CONST_DOUBLE_LOW (op0);
1905 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1909 l0u = l0s = INTVAL (op0);
1910 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1913 if (GET_CODE (op1) == CONST_DOUBLE)
1915 l1u = l1s = CONST_DOUBLE_LOW (op1);
1916 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1920 l1u = l1s = INTVAL (op1);
1921 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1924 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1925 we have to sign or zero-extend the values. */
1926 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1928 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1929 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1931 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1932 l0s |= ((HOST_WIDE_INT) (-1) << width);
1934 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1935 l1s |= ((HOST_WIDE_INT) (-1) << width);
1937 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1938 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1940 equal = (h0u == h1u && l0u == l1u);
1941 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1942 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1943 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1944 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1947 /* Otherwise, there are some code-specific tests we can make. */
1953 /* References to the frame plus a constant or labels cannot
1954 be zero, but a SYMBOL_REF can due to #pragma weak. */
1955 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1956 || GET_CODE (op0) == LABEL_REF)
1957 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1958 /* On some machines, the ap reg can be 0 sometimes. */
1959 && op0 != arg_pointer_rtx
1966 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1967 || GET_CODE (op0) == LABEL_REF)
1968 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1969 && op0 != arg_pointer_rtx
1972 return const_true_rtx;
1976 /* Unsigned values are never negative. */
1977 if (op1 == const0_rtx)
1978 return const_true_rtx;
1982 if (op1 == const0_rtx)
1987 /* Unsigned values are never greater than the largest
1989 if (GET_CODE (op1) == CONST_INT
1990 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1991 && INTEGRAL_MODE_P (mode))
1992 return const_true_rtx;
1996 if (GET_CODE (op1) == CONST_INT
1997 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1998 && INTEGRAL_MODE_P (mode))
2009 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2015 return equal ? const_true_rtx : const0_rtx;
2018 return ! equal ? const_true_rtx : const0_rtx;
2021 return op0lt ? const_true_rtx : const0_rtx;
2024 return op1lt ? const_true_rtx : const0_rtx;
2026 return op0ltu ? const_true_rtx : const0_rtx;
2028 return op1ltu ? const_true_rtx : const0_rtx;
2031 return equal || op0lt ? const_true_rtx : const0_rtx;
2034 return equal || op1lt ? const_true_rtx : const0_rtx;
2036 return equal || op0ltu ? const_true_rtx : const0_rtx;
2038 return equal || op1ltu ? const_true_rtx : const0_rtx;
2040 return const_true_rtx;
2048 /* Simplify CODE, an operation with result mode MODE and three operands,
2049 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2050 a constant. Return 0 if no simplifications is possible. */
2053 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2055 enum machine_mode mode, op0_mode;
2058 unsigned int width = GET_MODE_BITSIZE (mode);
2060 /* VOIDmode means "infinite" precision. */
2062 width = HOST_BITS_PER_WIDE_INT;
2068 if (GET_CODE (op0) == CONST_INT
2069 && GET_CODE (op1) == CONST_INT
2070 && GET_CODE (op2) == CONST_INT
2071 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2072 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2074 /* Extracting a bit-field from a constant */
2075 HOST_WIDE_INT val = INTVAL (op0);
2077 if (BITS_BIG_ENDIAN)
2078 val >>= (GET_MODE_BITSIZE (op0_mode)
2079 - INTVAL (op2) - INTVAL (op1));
2081 val >>= INTVAL (op2);
2083 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2085 /* First zero-extend. */
2086 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2087 /* If desired, propagate sign bit. */
2088 if (code == SIGN_EXTRACT
2089 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2090 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2093 /* Clear the bits that don't belong in our mode,
2094 unless they and our sign bit are all one.
2095 So we get either a reasonable negative value or a reasonable
2096 unsigned value for this mode. */
2097 if (width < HOST_BITS_PER_WIDE_INT
2098 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2099 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2100 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2102 return GEN_INT (val);
2107 if (GET_CODE (op0) == CONST_INT)
2108 return op0 != const0_rtx ? op1 : op2;
2110 /* Convert a == b ? b : a to "a". */
2111 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2112 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2113 && rtx_equal_p (XEXP (op0, 0), op1)
2114 && rtx_equal_p (XEXP (op0, 1), op2))
2116 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2117 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2118 && rtx_equal_p (XEXP (op0, 1), op1)
2119 && rtx_equal_p (XEXP (op0, 0), op2))
2121 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2123 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2124 ? GET_MODE (XEXP (op0, 1))
2125 : GET_MODE (XEXP (op0, 0)));
2127 if (cmp_mode == VOIDmode)
2128 cmp_mode = op0_mode;
2129 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2130 XEXP (op0, 0), XEXP (op0, 1));
2132 /* See if any simplifications were possible. */
2133 if (temp == const0_rtx)
2135 else if (temp == const1_rtx)
2140 /* Look for happy constants in op1 and op2. */
2141 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2143 HOST_WIDE_INT t = INTVAL (op1);
2144 HOST_WIDE_INT f = INTVAL (op2);
2146 if (t == STORE_FLAG_VALUE && f == 0)
2147 code = GET_CODE (op0);
2148 else if (t == 0 && f == STORE_FLAG_VALUE)
2151 tmp = reversed_comparison_code (op0, NULL_RTX);
2159 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2171 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2172 Return 0 if no simplifications is possible. */
2174 simplify_subreg (outermode, op, innermode, byte)
2177 enum machine_mode outermode, innermode;
2179 /* Little bit of sanity checking. */
2180 if (innermode == VOIDmode || outermode == VOIDmode
2181 || innermode == BLKmode || outermode == BLKmode)
2184 if (GET_MODE (op) != innermode
2185 && GET_MODE (op) != VOIDmode)
2188 if (byte % GET_MODE_SIZE (outermode)
2189 || byte >= GET_MODE_SIZE (innermode))
2192 if (outermode == innermode && !byte)
2195 /* Attempt to simplify constant to non-SUBREG expression. */
2196 if (CONSTANT_P (op))
2199 unsigned HOST_WIDE_INT val = 0;
2201 /* ??? This code is partly redundant with code bellow, but can handle
2202 the subregs of floats and similar corner cases.
2203 Later it we should move all simplification code here and rewrite
2204 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2205 using SIMPLIFY_SUBREG. */
2206 if (subreg_lowpart_offset (outermode, innermode) == byte)
2208 rtx new = gen_lowpart_if_possible (outermode, op);
2213 /* Similar comment as above apply here. */
2214 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2215 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2216 && GET_MODE_CLASS (outermode) == MODE_INT)
2218 rtx new = constant_subword (op,
2219 (byte / UNITS_PER_WORD),
2225 offset = byte * BITS_PER_UNIT;
2226 switch (GET_CODE (op))
2229 if (GET_MODE (op) != VOIDmode)
2232 /* We can't handle this case yet. */
2233 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2236 part = offset >= HOST_BITS_PER_WIDE_INT;
2237 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2238 && BYTES_BIG_ENDIAN)
2239 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2240 && WORDS_BIG_ENDIAN))
2242 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2243 offset %= HOST_BITS_PER_WIDE_INT;
2245 /* We've already picked the word we want from a double, so
2246 pretend this is actually an integer. */
2247 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2251 if (GET_CODE (op) == CONST_INT)
2254 /* We don't handle synthetizing of non-integral constants yet. */
2255 if (GET_MODE_CLASS (outermode) != MODE_INT)
2258 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2260 if (WORDS_BIG_ENDIAN)
2261 offset = (GET_MODE_BITSIZE (innermode)
2262 - GET_MODE_BITSIZE (outermode) - offset);
2263 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2264 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2265 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2266 - 2 * (offset % BITS_PER_WORD));
2269 if (offset >= HOST_BITS_PER_WIDE_INT)
2270 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2274 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2275 val = trunc_int_for_mode (val, outermode);
2276 return GEN_INT (val);
2283 /* Changing mode twice with SUBREG => just change it once,
2284 or not at all if changing back op starting mode. */
2285 if (GET_CODE (op) == SUBREG)
2287 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2288 int final_offset = byte + SUBREG_BYTE (op);
2291 if (outermode == innermostmode
2292 && byte == 0 && SUBREG_BYTE (op) == 0)
2293 return SUBREG_REG (op);
2295 /* The SUBREG_BYTE represents offset, as if the value were stored
2296 in memory. Irritating exception is paradoxical subreg, where
2297 we define SUBREG_BYTE to be 0. On big endian machines, this
2298 value should be negative. For a moment, undo this exception. */
2299 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2301 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2302 if (WORDS_BIG_ENDIAN)
2303 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2304 if (BYTES_BIG_ENDIAN)
2305 final_offset += difference % UNITS_PER_WORD;
2307 if (SUBREG_BYTE (op) == 0
2308 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2310 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2311 if (WORDS_BIG_ENDIAN)
2312 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2313 if (BYTES_BIG_ENDIAN)
2314 final_offset += difference % UNITS_PER_WORD;
2317 /* See whether resulting subreg will be paradoxical. */
2318 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2320 /* In nonparadoxical subregs we can't handle negative offsets. */
2321 if (final_offset < 0)
2323 /* Bail out in case resulting subreg would be incorrect. */
2324 if (final_offset % GET_MODE_SIZE (outermode)
2325 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2331 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2333 /* In paradoxical subreg, see if we are still looking on lower part.
2334 If so, our SUBREG_BYTE will be 0. */
2335 if (WORDS_BIG_ENDIAN)
2336 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2337 if (BYTES_BIG_ENDIAN)
2338 offset += difference % UNITS_PER_WORD;
2339 if (offset == final_offset)
2345 /* Recurse for futher possible simplifications. */
2346 new = simplify_subreg (outermode, SUBREG_REG (op),
2347 GET_MODE (SUBREG_REG (op)),
2351 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2354 /* SUBREG of a hard register => just change the register number
2355 and/or mode. If the hard register is not valid in that mode,
2356 suppress this simplification. If the hard register is the stack,
2357 frame, or argument pointer, leave this as a SUBREG. */
2360 && (! REG_FUNCTION_VALUE_P (op)
2361 || ! rtx_equal_function_value_matters)
2362 #ifdef CLASS_CANNOT_CHANGE_MODE
2363 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2364 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2365 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2366 && (TEST_HARD_REG_BIT
2367 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2370 && REGNO (op) < FIRST_PSEUDO_REGISTER
2371 && ((reload_completed && !frame_pointer_needed)
2372 || (REGNO (op) != FRAME_POINTER_REGNUM
2373 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2374 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2377 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2378 && REGNO (op) != ARG_POINTER_REGNUM
2380 && REGNO (op) != STACK_POINTER_REGNUM)
2382 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2385 /* ??? We do allow it if the current REG is not valid for
2386 its mode. This is a kludge to work around how float/complex
2387 arguments are passed on 32-bit Sparc and should be fixed. */
2388 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2389 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2390 return gen_rtx_REG (outermode, final_regno);
2393 /* If we have a SUBREG of a register that we are replacing and we are
2394 replacing it with a MEM, make a new MEM and try replacing the
2395 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2396 or if we would be widening it. */
2398 if (GET_CODE (op) == MEM
2399 && ! mode_dependent_address_p (XEXP (op, 0))
2400 /* Allow splitting of volatile memory references in case we don't
2401 have instruction to move the whole thing. */
2402 && (! MEM_VOLATILE_P (op)
2403 || (mov_optab->handlers[(int) innermode].insn_code
2404 == CODE_FOR_nothing))
2405 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2406 return adjust_address_nv (op, outermode, byte);
2408 /* Handle complex values represented as CONCAT
2409 of real and imaginary part. */
2410 if (GET_CODE (op) == CONCAT)
2412 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2413 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2414 unsigned int final_offset;
2417 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2418 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2421 /* We can at least simplify it by referring directly to the relevent part. */
2422 return gen_rtx_SUBREG (outermode, part, final_offset);
2427 /* Make a SUBREG operation or equivalent if it folds. */
2430 simplify_gen_subreg (outermode, op, innermode, byte)
2433 enum machine_mode outermode, innermode;
2436 /* Little bit of sanity checking. */
2437 if (innermode == VOIDmode || outermode == VOIDmode
2438 || innermode == BLKmode || outermode == BLKmode)
2441 if (GET_MODE (op) != innermode
2442 && GET_MODE (op) != VOIDmode)
2445 if (byte % GET_MODE_SIZE (outermode)
2446 || byte >= GET_MODE_SIZE (innermode))
2449 new = simplify_subreg (outermode, op, innermode, byte);
2453 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2456 return gen_rtx_SUBREG (outermode, op, byte);
2458 /* Simplify X, an rtx expression.
2460 Return the simplified expression or NULL if no simplifications
2463 This is the preferred entry point into the simplification routines;
2464 however, we still allow passes to call the more specific routines.
2466 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2467 code that need to be unified.
2469 1. fold_rtx in cse.c. This code uses various CSE specific
2470 information to aid in RTL simplification.
2472 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2473 it uses combine specific information to aid in RTL
2476 3. The routines in this file.
2479 Long term we want to only have one body of simplification code; to
2480 get to that state I recommend the following steps:
2482 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2483 which are not pass dependent state into these routines.
2485 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2486 use this routine whenever possible.
2488 3. Allow for pass dependent state to be provided to these
2489 routines and add simplifications based on the pass dependent
2490 state. Remove code from cse.c & combine.c that becomes
2493 It will take time, but ultimately the compiler will be easier to
2494 maintain and improve. It's totally silly that when we add a
2495 simplification that it needs to be added to 4 places (3 for RTL
2496 simplification and 1 for tree simplification. */
2502 enum rtx_code code = GET_CODE (x);
2503 enum machine_mode mode = GET_MODE (x);
2505 switch (GET_RTX_CLASS (code))
2508 return simplify_unary_operation (code, mode,
2509 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2511 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2516 XEXP (x, 0) = XEXP (x, 1);
2518 return simplify_binary_operation (code, mode,
2519 XEXP (x, 0), XEXP (x, 1));
2523 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2527 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2528 XEXP (x, 0), XEXP (x, 1),
2532 return simplify_relational_operation (code,
2533 ((GET_MODE (XEXP (x, 0))
2535 ? GET_MODE (XEXP (x, 0))
2536 : GET_MODE (XEXP (x, 1))),
2537 XEXP (x, 0), XEXP (x, 1));
2539 /* The only case we try to handle is a SUBREG. */
2541 return simplify_gen_subreg (mode, SUBREG_REG (x),
2542 GET_MODE (SUBREG_REG (x)),