1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code, mode, op0, op1)
109 enum machine_mode mode;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && swap_commutative_operands_p (op0, op1))
117 tem = op0, op0 = op1, op1 = tem;
119 /* If this simplifies, do it. */
120 tem = simplify_binary_operation (code, mode, op0, op1);
125 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
126 just form the operation. */
128 if (code == PLUS && GET_CODE (op1) == CONST_INT
129 && GET_MODE (op0) != VOIDmode)
130 return plus_constant (op0, INTVAL (op1));
131 else if (code == MINUS && GET_CODE (op1) == CONST_INT
132 && GET_MODE (op0) != VOIDmode)
133 return plus_constant (op0, - INTVAL (op1));
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* Make a unary operation by first seeing if it folds and otherwise making
139 the specified operation. */
142 simplify_gen_unary (code, mode, op, op_mode)
144 enum machine_mode mode;
146 enum machine_mode op_mode;
150 /* If this simplifies, use it. */
151 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
154 return gen_rtx_fmt_e (code, mode, op);
157 /* Likewise for ternary operations. */
160 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
162 enum machine_mode mode, op0_mode;
167 /* If this simplifies, use it. */
168 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
172 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
175 /* Likewise, for relational operations.
176 CMP_MODE specifies mode comparison is done in.
180 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
182 enum machine_mode mode;
183 enum machine_mode cmp_mode;
188 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
191 /* Put complex operands first and constants second. */
192 if (swap_commutative_operands_p (op0, op1))
193 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
195 return gen_rtx_fmt_ee (code, mode, op0, op1);
198 /* Replace all occurrences of OLD in X with NEW and try to simplify the
199 resulting RTX. Return a new RTX which is as simplified as possible. */
202 simplify_replace_rtx (x, old, new)
207 enum rtx_code code = GET_CODE (x);
208 enum machine_mode mode = GET_MODE (x);
210 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
211 to build a new expression substituting recursively. If we can't do
212 anything, return our input. */
217 switch (GET_RTX_CLASS (code))
221 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
222 rtx op = (XEXP (x, 0) == old
223 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
225 return simplify_gen_unary (code, mode, op, op_mode);
231 simplify_gen_binary (code, mode,
232 simplify_replace_rtx (XEXP (x, 0), old, new),
233 simplify_replace_rtx (XEXP (x, 1), old, new));
236 simplify_gen_relational (code, mode,
237 (GET_MODE (XEXP (x, 0)) != VOIDmode
238 ? GET_MODE (XEXP (x, 0))
239 : GET_MODE (XEXP (x, 1))),
240 simplify_replace_rtx (XEXP (x, 0), old, new),
241 simplify_replace_rtx (XEXP (x, 1), old, new));
246 simplify_gen_ternary (code, mode, GET_MODE (XEXP (x, 0)),
247 simplify_replace_rtx (XEXP (x, 0), old, new),
248 simplify_replace_rtx (XEXP (x, 1), old, new),
249 simplify_replace_rtx (XEXP (x, 2), old, new));
252 /* The only case we try to handle is a SUBREG. */
256 exp = simplify_gen_subreg (GET_MODE (x),
257 simplify_replace_rtx (SUBREG_REG (x),
259 GET_MODE (SUBREG_REG (x)),
267 if (GET_CODE (x) == MEM)
269 /* We can't use change_address here, since it verifies memory address
270 for corectness. We don't want such check, since we may handle
271 addresses previously incorect (such as ones in push instructions)
272 and it is caller's work to verify whether resulting insn match. */
273 rtx addr = simplify_replace_rtx (XEXP (x, 0), old, new);
275 if (XEXP (x, 0) != addr)
277 mem = gen_rtx_MEM (GET_MODE (x), addr);
278 MEM_COPY_ATTRIBUTES (mem, x);
290 /* Try to simplify a unary operation CODE whose output mode is to be
291 MODE with input operand OP whose mode was originally OP_MODE.
292 Return zero if no simplification can be made. */
295 simplify_unary_operation (code, mode, op, op_mode)
297 enum machine_mode mode;
299 enum machine_mode op_mode;
301 unsigned int width = GET_MODE_BITSIZE (mode);
303 /* The order of these tests is critical so that, for example, we don't
304 check the wrong mode (input vs. output) for a conversion operation,
305 such as FIX. At some point, this should be simplified. */
307 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
309 if (code == FLOAT && GET_MODE (op) == VOIDmode
310 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
312 HOST_WIDE_INT hv, lv;
315 if (GET_CODE (op) == CONST_INT)
316 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
318 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
320 #ifdef REAL_ARITHMETIC
321 REAL_VALUE_FROM_INT (d, lv, hv, mode);
326 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
327 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
328 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
334 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
335 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
336 d += (double) (unsigned HOST_WIDE_INT) lv;
338 #endif /* REAL_ARITHMETIC */
339 d = real_value_truncate (mode, d);
340 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
342 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
343 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
345 HOST_WIDE_INT hv, lv;
348 if (GET_CODE (op) == CONST_INT)
349 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
351 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
353 if (op_mode == VOIDmode)
355 /* We don't know how to interpret negative-looking numbers in
356 this case, so don't try to fold those. */
360 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
363 hv = 0, lv &= GET_MODE_MASK (op_mode);
365 #ifdef REAL_ARITHMETIC
366 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
369 d = (double) (unsigned HOST_WIDE_INT) hv;
370 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
371 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
372 d += (double) (unsigned HOST_WIDE_INT) lv;
373 #endif /* REAL_ARITHMETIC */
374 d = real_value_truncate (mode, d);
375 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
379 if (GET_CODE (op) == CONST_INT
380 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
382 register HOST_WIDE_INT arg0 = INTVAL (op);
383 register HOST_WIDE_INT val;
396 val = (arg0 >= 0 ? arg0 : - arg0);
400 /* Don't use ffs here. Instead, get low order bit and then its
401 number. If arg0 is zero, this will return 0, as desired. */
402 arg0 &= GET_MODE_MASK (mode);
403 val = exact_log2 (arg0 & (- arg0)) + 1;
411 if (op_mode == VOIDmode)
413 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
415 /* If we were really extending the mode,
416 we would have to distinguish between zero-extension
417 and sign-extension. */
418 if (width != GET_MODE_BITSIZE (op_mode))
422 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
423 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
429 if (op_mode == VOIDmode)
431 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
433 /* If we were really extending the mode,
434 we would have to distinguish between zero-extension
435 and sign-extension. */
436 if (width != GET_MODE_BITSIZE (op_mode))
440 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
443 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
445 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
446 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
461 val = trunc_int_for_mode (val, mode);
463 return GEN_INT (val);
466 /* We can do some operations on integer CONST_DOUBLEs. Also allow
467 for a DImode operation on a CONST_INT. */
468 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
469 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
471 unsigned HOST_WIDE_INT l1, lv;
472 HOST_WIDE_INT h1, hv;
474 if (GET_CODE (op) == CONST_DOUBLE)
475 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
477 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
487 neg_double (l1, h1, &lv, &hv);
492 neg_double (l1, h1, &lv, &hv);
500 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
502 lv = exact_log2 (l1 & (-l1)) + 1;
506 /* This is just a change-of-mode, so do nothing. */
511 if (op_mode == VOIDmode
512 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
516 lv = l1 & GET_MODE_MASK (op_mode);
520 if (op_mode == VOIDmode
521 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
525 lv = l1 & GET_MODE_MASK (op_mode);
526 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
527 && (lv & ((HOST_WIDE_INT) 1
528 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
529 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
531 hv = HWI_SIGN_EXTEND (lv);
542 return immed_double_const (lv, hv, mode);
545 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
546 else if (GET_CODE (op) == CONST_DOUBLE
547 && GET_MODE_CLASS (mode) == MODE_FLOAT)
553 if (setjmp (handler))
554 /* There used to be a warning here, but that is inadvisable.
555 People may want to cause traps, and the natural way
556 to do it should not get a warning. */
559 set_float_handler (handler);
561 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
566 d = REAL_VALUE_NEGATE (d);
570 if (REAL_VALUE_NEGATIVE (d))
571 d = REAL_VALUE_NEGATE (d);
575 d = real_value_truncate (mode, d);
579 /* All this does is change the mode. */
583 d = REAL_VALUE_RNDZINT (d);
587 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
597 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
598 set_float_handler (NULL);
602 else if (GET_CODE (op) == CONST_DOUBLE
603 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
604 && GET_MODE_CLASS (mode) == MODE_INT
605 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
611 if (setjmp (handler))
614 set_float_handler (handler);
616 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
621 val = REAL_VALUE_FIX (d);
625 val = REAL_VALUE_UNSIGNED_FIX (d);
632 set_float_handler (NULL);
634 val = trunc_int_for_mode (val, mode);
636 return GEN_INT (val);
639 /* This was formerly used only for non-IEEE float.
640 eggert@twinsun.com says it is safe for IEEE also. */
643 enum rtx_code reversed;
644 /* There are some simplifications we can do even if the operands
649 /* (not (not X)) == X. */
650 if (GET_CODE (op) == NOT)
653 /* (not (eq X Y)) == (ne X Y), etc. */
654 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
655 && ((reversed = reversed_comparison_code (op, NULL_RTX))
657 return gen_rtx_fmt_ee (reversed,
658 op_mode, XEXP (op, 0), XEXP (op, 1));
662 /* (neg (neg X)) == X. */
663 if (GET_CODE (op) == NEG)
668 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
669 becomes just the MINUS if its mode is MODE. This allows
670 folding switch statements on machines using casesi (such as
672 if (GET_CODE (op) == TRUNCATE
673 && GET_MODE (XEXP (op, 0)) == mode
674 && GET_CODE (XEXP (op, 0)) == MINUS
675 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
676 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
679 #ifdef POINTERS_EXTEND_UNSIGNED
680 if (! POINTERS_EXTEND_UNSIGNED
681 && mode == Pmode && GET_MODE (op) == ptr_mode
683 || (GET_CODE (op) == SUBREG
684 && GET_CODE (SUBREG_REG (op)) == REG
685 && REG_POINTER (SUBREG_REG (op))
686 && GET_MODE (SUBREG_REG (op)) == Pmode)))
687 return convert_memory_address (Pmode, op);
691 #ifdef POINTERS_EXTEND_UNSIGNED
693 if (POINTERS_EXTEND_UNSIGNED
694 && mode == Pmode && GET_MODE (op) == ptr_mode
696 || (GET_CODE (op) == SUBREG
697 && GET_CODE (SUBREG_REG (op)) == REG
698 && REG_POINTER (SUBREG_REG (op))
699 && GET_MODE (SUBREG_REG (op)) == Pmode)))
700 return convert_memory_address (Pmode, op);
712 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
713 and OP1. Return 0 if no simplification is possible.
715 Don't use this for relational operations such as EQ or LT.
716 Use simplify_relational_operation instead. */
719 simplify_binary_operation (code, mode, op0, op1)
721 enum machine_mode mode;
724 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
726 unsigned int width = GET_MODE_BITSIZE (mode);
729 /* Relational operations don't work here. We must know the mode
730 of the operands in order to do the comparison correctly.
731 Assuming a full word can give incorrect results.
732 Consider comparing 128 with -128 in QImode. */
734 if (GET_RTX_CLASS (code) == '<')
737 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
738 if (GET_MODE_CLASS (mode) == MODE_FLOAT
739 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
740 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
742 REAL_VALUE_TYPE f0, f1, value;
745 if (setjmp (handler))
748 set_float_handler (handler);
750 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
751 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
752 f0 = real_value_truncate (mode, f0);
753 f1 = real_value_truncate (mode, f1);
755 #ifdef REAL_ARITHMETIC
756 #ifndef REAL_INFINITY
757 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
760 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
774 #ifndef REAL_INFINITY
781 value = MIN (f0, f1);
784 value = MAX (f0, f1);
791 value = real_value_truncate (mode, value);
792 set_float_handler (NULL);
793 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
795 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
797 /* We can fold some multi-word operations. */
798 if (GET_MODE_CLASS (mode) == MODE_INT
799 && width == HOST_BITS_PER_WIDE_INT * 2
800 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
801 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
803 unsigned HOST_WIDE_INT l1, l2, lv;
804 HOST_WIDE_INT h1, h2, hv;
806 if (GET_CODE (op0) == CONST_DOUBLE)
807 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
809 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
811 if (GET_CODE (op1) == CONST_DOUBLE)
812 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
814 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
819 /* A - B == A + (-B). */
820 neg_double (l2, h2, &lv, &hv);
823 /* .. fall through ... */
826 add_double (l1, h1, l2, h2, &lv, &hv);
830 mul_double (l1, h1, l2, h2, &lv, &hv);
833 case DIV: case MOD: case UDIV: case UMOD:
834 /* We'd need to include tree.h to do this and it doesn't seem worth
839 lv = l1 & l2, hv = h1 & h2;
843 lv = l1 | l2, hv = h1 | h2;
847 lv = l1 ^ l2, hv = h1 ^ h2;
853 && ((unsigned HOST_WIDE_INT) l1
854 < (unsigned HOST_WIDE_INT) l2)))
863 && ((unsigned HOST_WIDE_INT) l1
864 > (unsigned HOST_WIDE_INT) l2)))
871 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
873 && ((unsigned HOST_WIDE_INT) l1
874 < (unsigned HOST_WIDE_INT) l2)))
881 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
883 && ((unsigned HOST_WIDE_INT) l1
884 > (unsigned HOST_WIDE_INT) l2)))
890 case LSHIFTRT: case ASHIFTRT:
892 case ROTATE: case ROTATERT:
893 #ifdef SHIFT_COUNT_TRUNCATED
894 if (SHIFT_COUNT_TRUNCATED)
895 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
898 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
901 if (code == LSHIFTRT || code == ASHIFTRT)
902 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
904 else if (code == ASHIFT)
905 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
906 else if (code == ROTATE)
907 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
908 else /* code == ROTATERT */
909 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
916 return immed_double_const (lv, hv, mode);
919 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
920 || width > HOST_BITS_PER_WIDE_INT || width == 0)
922 /* Even if we can't compute a constant result,
923 there are some cases worth simplifying. */
928 /* In IEEE floating point, x+0 is not the same as x. Similarly
929 for the other optimizations below. */
930 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
931 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
934 if (op1 == CONST0_RTX (mode))
937 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
938 if (GET_CODE (op0) == NEG)
939 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
940 else if (GET_CODE (op1) == NEG)
941 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
944 if (INTEGRAL_MODE_P (mode)
945 && GET_CODE (op0) == NOT
946 && GET_CODE (op1) == CONST_INT
947 && INTVAL (op1) == 1)
948 return gen_rtx_NEG (mode, XEXP (op0, 0));
950 /* Handle both-operands-constant cases. We can only add
951 CONST_INTs to constants since the sum of relocatable symbols
952 can't be handled by most assemblers. Don't add CONST_INT
953 to CONST_INT since overflow won't be computed properly if wider
954 than HOST_BITS_PER_WIDE_INT. */
956 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
957 && GET_CODE (op1) == CONST_INT)
958 return plus_constant (op0, INTVAL (op1));
959 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
960 && GET_CODE (op0) == CONST_INT)
961 return plus_constant (op1, INTVAL (op0));
963 /* See if this is something like X * C - X or vice versa or
964 if the multiplication is written as a shift. If so, we can
965 distribute and make a new multiply, shift, or maybe just
966 have X (if C is 2 in the example above). But don't make
967 real multiply if we didn't have one before. */
969 if (! FLOAT_MODE_P (mode))
971 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
972 rtx lhs = op0, rhs = op1;
975 if (GET_CODE (lhs) == NEG)
976 coeff0 = -1, lhs = XEXP (lhs, 0);
977 else if (GET_CODE (lhs) == MULT
978 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
980 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
983 else if (GET_CODE (lhs) == ASHIFT
984 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
985 && INTVAL (XEXP (lhs, 1)) >= 0
986 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
988 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
992 if (GET_CODE (rhs) == NEG)
993 coeff1 = -1, rhs = XEXP (rhs, 0);
994 else if (GET_CODE (rhs) == MULT
995 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
997 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1000 else if (GET_CODE (rhs) == ASHIFT
1001 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1002 && INTVAL (XEXP (rhs, 1)) >= 0
1003 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1005 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1006 rhs = XEXP (rhs, 0);
1009 if (rtx_equal_p (lhs, rhs))
1011 tem = simplify_gen_binary (MULT, mode, lhs,
1012 GEN_INT (coeff0 + coeff1));
1013 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1017 /* If one of the operands is a PLUS or a MINUS, see if we can
1018 simplify this by the associative law.
1019 Don't use the associative law for floating point.
1020 The inaccuracy makes it nonassociative,
1021 and subtle programs can break if operations are associated. */
1023 if (INTEGRAL_MODE_P (mode)
1024 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1025 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1026 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1032 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1033 using cc0, in which case we want to leave it as a COMPARE
1034 so we can distinguish it from a register-register-copy.
1036 In IEEE floating point, x-0 is not the same as x. */
1038 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1039 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1040 && op1 == CONST0_RTX (mode))
1044 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1045 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1046 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1047 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1049 rtx xop00 = XEXP (op0, 0);
1050 rtx xop10 = XEXP (op1, 0);
1053 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1055 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1056 && GET_MODE (xop00) == GET_MODE (xop10)
1057 && REGNO (xop00) == REGNO (xop10)
1058 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1059 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1066 /* None of these optimizations can be done for IEEE
1068 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1069 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1072 /* We can't assume x-x is 0 even with non-IEEE floating point,
1073 but since it is zero except in very strange circumstances, we
1074 will treat it as zero with -funsafe-math-optimizations. */
1075 if (rtx_equal_p (op0, op1)
1076 && ! side_effects_p (op0)
1077 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1078 return CONST0_RTX (mode);
1080 /* Change subtraction from zero into negation. */
1081 if (op0 == CONST0_RTX (mode))
1082 return gen_rtx_NEG (mode, op1);
1084 /* (-1 - a) is ~a. */
1085 if (op0 == constm1_rtx)
1086 return gen_rtx_NOT (mode, op1);
1088 /* Subtracting 0 has no effect. */
1089 if (op1 == CONST0_RTX (mode))
1092 /* See if this is something like X * C - X or vice versa or
1093 if the multiplication is written as a shift. If so, we can
1094 distribute and make a new multiply, shift, or maybe just
1095 have X (if C is 2 in the example above). But don't make
1096 real multiply if we didn't have one before. */
1098 if (! FLOAT_MODE_P (mode))
1100 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1101 rtx lhs = op0, rhs = op1;
1104 if (GET_CODE (lhs) == NEG)
1105 coeff0 = -1, lhs = XEXP (lhs, 0);
1106 else if (GET_CODE (lhs) == MULT
1107 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1109 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1112 else if (GET_CODE (lhs) == ASHIFT
1113 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1114 && INTVAL (XEXP (lhs, 1)) >= 0
1115 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1117 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1118 lhs = XEXP (lhs, 0);
1121 if (GET_CODE (rhs) == NEG)
1122 coeff1 = - 1, rhs = XEXP (rhs, 0);
1123 else if (GET_CODE (rhs) == MULT
1124 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1126 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1129 else if (GET_CODE (rhs) == ASHIFT
1130 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1131 && INTVAL (XEXP (rhs, 1)) >= 0
1132 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1134 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1135 rhs = XEXP (rhs, 0);
1138 if (rtx_equal_p (lhs, rhs))
1140 tem = simplify_gen_binary (MULT, mode, lhs,
1141 GEN_INT (coeff0 - coeff1));
1142 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1146 /* (a - (-b)) -> (a + b). */
1147 if (GET_CODE (op1) == NEG)
1148 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1150 /* If one of the operands is a PLUS or a MINUS, see if we can
1151 simplify this by the associative law.
1152 Don't use the associative law for floating point.
1153 The inaccuracy makes it nonassociative,
1154 and subtle programs can break if operations are associated. */
1156 if (INTEGRAL_MODE_P (mode)
1157 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1158 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1159 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1162 /* Don't let a relocatable value get a negative coeff. */
1163 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1164 return plus_constant (op0, - INTVAL (op1));
1166 /* (x - (x & y)) -> (x & ~y) */
1167 if (GET_CODE (op1) == AND)
1169 if (rtx_equal_p (op0, XEXP (op1, 0)))
1170 return simplify_gen_binary (AND, mode, op0,
1171 gen_rtx_NOT (mode, XEXP (op1, 1)));
1172 if (rtx_equal_p (op0, XEXP (op1, 1)))
1173 return simplify_gen_binary (AND, mode, op0,
1174 gen_rtx_NOT (mode, XEXP (op1, 0)));
1179 if (op1 == constm1_rtx)
1181 tem = simplify_unary_operation (NEG, mode, op0, mode);
1183 return tem ? tem : gen_rtx_NEG (mode, op0);
1186 /* In IEEE floating point, x*0 is not always 0. */
1187 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1188 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1189 && op1 == CONST0_RTX (mode)
1190 && ! side_effects_p (op0))
1193 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1194 However, ANSI says we can drop signals,
1195 so we can do this anyway. */
1196 if (op1 == CONST1_RTX (mode))
1199 /* Convert multiply by constant power of two into shift unless
1200 we are still generating RTL. This test is a kludge. */
1201 if (GET_CODE (op1) == CONST_INT
1202 && (val = exact_log2 (INTVAL (op1))) >= 0
1203 /* If the mode is larger than the host word size, and the
1204 uppermost bit is set, then this isn't a power of two due
1205 to implicit sign extension. */
1206 && (width <= HOST_BITS_PER_WIDE_INT
1207 || val != HOST_BITS_PER_WIDE_INT - 1)
1208 && ! rtx_equal_function_value_matters)
1209 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1211 if (GET_CODE (op1) == CONST_DOUBLE
1212 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1216 int op1is2, op1ism1;
1218 if (setjmp (handler))
1221 set_float_handler (handler);
1222 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1223 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1224 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1225 set_float_handler (NULL);
1227 /* x*2 is x+x and x*(-1) is -x */
1228 if (op1is2 && GET_MODE (op0) == mode)
1229 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1231 else if (op1ism1 && GET_MODE (op0) == mode)
1232 return gen_rtx_NEG (mode, op0);
1237 if (op1 == const0_rtx)
1239 if (GET_CODE (op1) == CONST_INT
1240 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1242 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1244 /* A | (~A) -> -1 */
1245 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1246 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1247 && ! side_effects_p (op0)
1248 && GET_MODE_CLASS (mode) != MODE_CC)
1253 if (op1 == const0_rtx)
1255 if (GET_CODE (op1) == CONST_INT
1256 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1257 return gen_rtx_NOT (mode, op0);
1258 if (op0 == op1 && ! side_effects_p (op0)
1259 && GET_MODE_CLASS (mode) != MODE_CC)
1264 if (op1 == const0_rtx && ! side_effects_p (op0))
1266 if (GET_CODE (op1) == CONST_INT
1267 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1269 if (op0 == op1 && ! side_effects_p (op0)
1270 && GET_MODE_CLASS (mode) != MODE_CC)
1273 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1274 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1275 && ! side_effects_p (op0)
1276 && GET_MODE_CLASS (mode) != MODE_CC)
1281 /* Convert divide by power of two into shift (divide by 1 handled
1283 if (GET_CODE (op1) == CONST_INT
1284 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1285 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1287 /* ... fall through ... */
1290 if (op1 == CONST1_RTX (mode))
1293 /* In IEEE floating point, 0/x is not always 0. */
1294 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1295 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1296 && op0 == CONST0_RTX (mode)
1297 && ! side_effects_p (op1))
1300 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1301 /* Change division by a constant into multiplication. Only do
1302 this with -funsafe-math-optimizations. */
1303 else if (GET_CODE (op1) == CONST_DOUBLE
1304 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1305 && op1 != CONST0_RTX (mode)
1306 && flag_unsafe_math_optimizations)
1309 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1311 if (! REAL_VALUES_EQUAL (d, dconst0))
1313 #if defined (REAL_ARITHMETIC)
1314 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1315 return gen_rtx_MULT (mode, op0,
1316 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1319 gen_rtx_MULT (mode, op0,
1320 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1328 /* Handle modulus by power of two (mod with 1 handled below). */
1329 if (GET_CODE (op1) == CONST_INT
1330 && exact_log2 (INTVAL (op1)) > 0)
1331 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1333 /* ... fall through ... */
1336 if ((op0 == const0_rtx || op1 == const1_rtx)
1337 && ! side_effects_p (op0) && ! side_effects_p (op1))
1343 /* Rotating ~0 always results in ~0. */
1344 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1345 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1346 && ! side_effects_p (op1))
1349 /* ... fall through ... */
1354 if (op1 == const0_rtx)
1356 if (op0 == const0_rtx && ! side_effects_p (op1))
1361 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1362 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1363 && ! side_effects_p (op0))
1365 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1370 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1371 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1372 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1373 && ! side_effects_p (op0))
1375 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1380 if (op1 == const0_rtx && ! side_effects_p (op0))
1382 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1387 if (op1 == constm1_rtx && ! side_effects_p (op0))
1389 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1400 /* Get the integer argument values in two forms:
1401 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1403 arg0 = INTVAL (op0);
1404 arg1 = INTVAL (op1);
1406 if (width < HOST_BITS_PER_WIDE_INT)
1408 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1409 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1412 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1413 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1416 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1417 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1425 /* Compute the value of the arithmetic. */
1430 val = arg0s + arg1s;
1434 val = arg0s - arg1s;
1438 val = arg0s * arg1s;
1443 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1446 val = arg0s / arg1s;
1451 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1454 val = arg0s % arg1s;
1459 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1462 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1467 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1470 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1486 /* If shift count is undefined, don't fold it; let the machine do
1487 what it wants. But truncate it if the machine will do that. */
1491 #ifdef SHIFT_COUNT_TRUNCATED
1492 if (SHIFT_COUNT_TRUNCATED)
1496 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1503 #ifdef SHIFT_COUNT_TRUNCATED
1504 if (SHIFT_COUNT_TRUNCATED)
1508 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1515 #ifdef SHIFT_COUNT_TRUNCATED
1516 if (SHIFT_COUNT_TRUNCATED)
1520 val = arg0s >> arg1;
1522 /* Bootstrap compiler may not have sign extended the right shift.
1523 Manually extend the sign to insure bootstrap cc matches gcc. */
1524 if (arg0s < 0 && arg1 > 0)
1525 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1534 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1535 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1543 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1544 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1548 /* Do nothing here. */
1552 val = arg0s <= arg1s ? arg0s : arg1s;
1556 val = ((unsigned HOST_WIDE_INT) arg0
1557 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1561 val = arg0s > arg1s ? arg0s : arg1s;
1565 val = ((unsigned HOST_WIDE_INT) arg0
1566 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1573 val = trunc_int_for_mode (val, mode);
1575 return GEN_INT (val);
1578 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1581 Rather than test for specific case, we do this by a brute-force method
1582 and do all possible simplifications until no more changes occur. Then
1583 we rebuild the operation. */
1586 simplify_plus_minus (code, mode, op0, op1)
1588 enum machine_mode mode;
1594 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1595 int first = 1, negate = 0, changed;
1598 memset ((char *) ops, 0, sizeof ops);
1600 /* Set up the two operands and then expand them until nothing has been
1601 changed. If we run out of room in our array, give up; this should
1602 almost never happen. */
1604 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1611 for (i = 0; i < n_ops; i++)
1612 switch (GET_CODE (ops[i]))
1619 ops[n_ops] = XEXP (ops[i], 1);
1620 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1621 ops[i] = XEXP (ops[i], 0);
1627 ops[i] = XEXP (ops[i], 0);
1628 negs[i] = ! negs[i];
1633 ops[i] = XEXP (ops[i], 0);
1639 /* ~a -> (-a - 1) */
1642 ops[n_ops] = constm1_rtx;
1643 negs[n_ops++] = negs[i];
1644 ops[i] = XEXP (ops[i], 0);
1645 negs[i] = ! negs[i];
1652 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1660 /* If we only have two operands, we can't do anything. */
1664 /* Now simplify each pair of operands until nothing changes. The first
1665 time through just simplify constants against each other. */
1672 for (i = 0; i < n_ops - 1; i++)
1673 for (j = i + 1; j < n_ops; j++)
1674 if (ops[i] != 0 && ops[j] != 0
1675 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1677 rtx lhs = ops[i], rhs = ops[j];
1678 enum rtx_code ncode = PLUS;
1680 if (negs[i] && ! negs[j])
1681 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1682 else if (! negs[i] && negs[j])
1685 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1688 ops[i] = tem, ops[j] = 0;
1689 negs[i] = negs[i] && negs[j];
1690 if (GET_CODE (tem) == NEG)
1691 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1693 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1694 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1702 /* Pack all the operands to the lower-numbered entries and give up if
1703 we didn't reduce the number of operands we had. Make sure we
1704 count a CONST as two operands. If we have the same number of
1705 operands, but have made more CONSTs than we had, this is also
1706 an improvement, so accept it. */
1708 for (i = 0, j = 0; j < n_ops; j++)
1711 ops[i] = ops[j], negs[i++] = negs[j];
1712 if (GET_CODE (ops[j]) == CONST)
1716 if (i + n_consts > input_ops
1717 || (i + n_consts == input_ops && n_consts <= input_consts))
1722 /* If we have a CONST_INT, put it last. */
1723 for (i = 0; i < n_ops - 1; i++)
1724 if (GET_CODE (ops[i]) == CONST_INT)
1726 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1727 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1730 /* Put a non-negated operand first. If there aren't any, make all
1731 operands positive and negate the whole thing later. */
1732 for (i = 0; i < n_ops && negs[i]; i++)
1737 for (i = 0; i < n_ops; i++)
1743 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1744 j = negs[0], negs[0] = negs[i], negs[i] = j;
1747 /* Now make the result by performing the requested operations. */
1749 for (i = 1; i < n_ops; i++)
1750 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1752 return negate ? gen_rtx_NEG (mode, result) : result;
1757 rtx op0, op1; /* Input */
1758 int equal, op0lt, op1lt; /* Output */
1763 check_fold_consts (data)
1766 struct cfc_args *args = (struct cfc_args *) data;
1767 REAL_VALUE_TYPE d0, d1;
1769 /* We may possibly raise an exception while reading the value. */
1770 args->unordered = 1;
1771 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1772 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1774 /* Comparisons of Inf versus Inf are ordered. */
1775 if (REAL_VALUE_ISNAN (d0)
1776 || REAL_VALUE_ISNAN (d1))
1778 args->equal = REAL_VALUES_EQUAL (d0, d1);
1779 args->op0lt = REAL_VALUES_LESS (d0, d1);
1780 args->op1lt = REAL_VALUES_LESS (d1, d0);
1781 args->unordered = 0;
1784 /* Like simplify_binary_operation except used for relational operators.
1785 MODE is the mode of the operands, not that of the result. If MODE
1786 is VOIDmode, both operands must also be VOIDmode and we compare the
1787 operands in "infinite precision".
1789 If no simplification is possible, this function returns zero. Otherwise,
1790 it returns either const_true_rtx or const0_rtx. */
1793 simplify_relational_operation (code, mode, op0, op1)
1795 enum machine_mode mode;
1798 int equal, op0lt, op0ltu, op1lt, op1ltu;
1801 if (mode == VOIDmode
1802 && (GET_MODE (op0) != VOIDmode
1803 || GET_MODE (op1) != VOIDmode))
1806 /* If op0 is a compare, extract the comparison arguments from it. */
1807 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1808 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1810 /* We can't simplify MODE_CC values since we don't know what the
1811 actual comparison is. */
1812 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1819 /* Make sure the constant is second. */
1820 if (swap_commutative_operands_p (op0, op1))
1822 tem = op0, op0 = op1, op1 = tem;
1823 code = swap_condition (code);
1826 /* For integer comparisons of A and B maybe we can simplify A - B and can
1827 then simplify a comparison of that with zero. If A and B are both either
1828 a register or a CONST_INT, this can't help; testing for these cases will
1829 prevent infinite recursion here and speed things up.
1831 If CODE is an unsigned comparison, then we can never do this optimization,
1832 because it gives an incorrect result if the subtraction wraps around zero.
1833 ANSI C defines unsigned operations such that they never overflow, and
1834 thus such cases can not be ignored. */
1836 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1837 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1838 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1839 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1840 && code != GTU && code != GEU && code != LTU && code != LEU)
1841 return simplify_relational_operation (signed_condition (code),
1842 mode, tem, const0_rtx);
1844 if (flag_unsafe_math_optimizations && code == ORDERED)
1845 return const_true_rtx;
1847 if (flag_unsafe_math_optimizations && code == UNORDERED)
1850 /* For non-IEEE floating-point, if the two operands are equal, we know the
1852 if (rtx_equal_p (op0, op1)
1853 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1854 || ! FLOAT_MODE_P (GET_MODE (op0))
1855 || flag_unsafe_math_optimizations))
1856 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1858 /* If the operands are floating-point constants, see if we can fold
1860 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1861 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1862 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1864 struct cfc_args args;
1866 /* Setup input for check_fold_consts() */
1871 if (!do_float_handler (check_fold_consts, (PTR) &args))
1884 return const_true_rtx;
1897 /* Receive output from check_fold_consts() */
1899 op0lt = op0ltu = args.op0lt;
1900 op1lt = op1ltu = args.op1lt;
1902 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1904 /* Otherwise, see if the operands are both integers. */
1905 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1906 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1907 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1909 int width = GET_MODE_BITSIZE (mode);
1910 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1911 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1913 /* Get the two words comprising each integer constant. */
1914 if (GET_CODE (op0) == CONST_DOUBLE)
1916 l0u = l0s = CONST_DOUBLE_LOW (op0);
1917 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1921 l0u = l0s = INTVAL (op0);
1922 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1925 if (GET_CODE (op1) == CONST_DOUBLE)
1927 l1u = l1s = CONST_DOUBLE_LOW (op1);
1928 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1932 l1u = l1s = INTVAL (op1);
1933 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1936 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1937 we have to sign or zero-extend the values. */
1938 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1940 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1941 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1943 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1944 l0s |= ((HOST_WIDE_INT) (-1) << width);
1946 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1947 l1s |= ((HOST_WIDE_INT) (-1) << width);
1949 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1950 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1952 equal = (h0u == h1u && l0u == l1u);
1953 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1954 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1955 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1956 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1959 /* Otherwise, there are some code-specific tests we can make. */
1965 /* References to the frame plus a constant or labels cannot
1966 be zero, but a SYMBOL_REF can due to #pragma weak. */
1967 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1968 || GET_CODE (op0) == LABEL_REF)
1969 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1970 /* On some machines, the ap reg can be 0 sometimes. */
1971 && op0 != arg_pointer_rtx
1978 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1979 || GET_CODE (op0) == LABEL_REF)
1980 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1981 && op0 != arg_pointer_rtx
1984 return const_true_rtx;
1988 /* Unsigned values are never negative. */
1989 if (op1 == const0_rtx)
1990 return const_true_rtx;
1994 if (op1 == const0_rtx)
1999 /* Unsigned values are never greater than the largest
2001 if (GET_CODE (op1) == CONST_INT
2002 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2003 && INTEGRAL_MODE_P (mode))
2004 return const_true_rtx;
2008 if (GET_CODE (op1) == CONST_INT
2009 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2010 && INTEGRAL_MODE_P (mode))
2021 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2027 return equal ? const_true_rtx : const0_rtx;
2030 return ! equal ? const_true_rtx : const0_rtx;
2033 return op0lt ? const_true_rtx : const0_rtx;
2036 return op1lt ? const_true_rtx : const0_rtx;
2038 return op0ltu ? const_true_rtx : const0_rtx;
2040 return op1ltu ? const_true_rtx : const0_rtx;
2043 return equal || op0lt ? const_true_rtx : const0_rtx;
2046 return equal || op1lt ? const_true_rtx : const0_rtx;
2048 return equal || op0ltu ? const_true_rtx : const0_rtx;
2050 return equal || op1ltu ? const_true_rtx : const0_rtx;
2052 return const_true_rtx;
2060 /* Simplify CODE, an operation with result mode MODE and three operands,
2061 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2062 a constant. Return 0 if no simplifications is possible. */
2065 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2067 enum machine_mode mode, op0_mode;
2070 unsigned int width = GET_MODE_BITSIZE (mode);
2072 /* VOIDmode means "infinite" precision. */
2074 width = HOST_BITS_PER_WIDE_INT;
2080 if (GET_CODE (op0) == CONST_INT
2081 && GET_CODE (op1) == CONST_INT
2082 && GET_CODE (op2) == CONST_INT
2083 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2084 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2086 /* Extracting a bit-field from a constant */
2087 HOST_WIDE_INT val = INTVAL (op0);
2089 if (BITS_BIG_ENDIAN)
2090 val >>= (GET_MODE_BITSIZE (op0_mode)
2091 - INTVAL (op2) - INTVAL (op1));
2093 val >>= INTVAL (op2);
2095 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2097 /* First zero-extend. */
2098 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2099 /* If desired, propagate sign bit. */
2100 if (code == SIGN_EXTRACT
2101 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2102 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2105 /* Clear the bits that don't belong in our mode,
2106 unless they and our sign bit are all one.
2107 So we get either a reasonable negative value or a reasonable
2108 unsigned value for this mode. */
2109 if (width < HOST_BITS_PER_WIDE_INT
2110 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2111 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2112 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2114 return GEN_INT (val);
2119 if (GET_CODE (op0) == CONST_INT)
2120 return op0 != const0_rtx ? op1 : op2;
2122 /* Convert a == b ? b : a to "a". */
2123 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2124 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2125 && rtx_equal_p (XEXP (op0, 0), op1)
2126 && rtx_equal_p (XEXP (op0, 1), op2))
2128 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2129 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2130 && rtx_equal_p (XEXP (op0, 1), op1)
2131 && rtx_equal_p (XEXP (op0, 0), op2))
2133 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2135 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2136 ? GET_MODE (XEXP (op0, 1))
2137 : GET_MODE (XEXP (op0, 0)));
2139 if (cmp_mode == VOIDmode)
2140 cmp_mode = op0_mode;
2141 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2142 XEXP (op0, 0), XEXP (op0, 1));
2144 /* See if any simplifications were possible. */
2145 if (temp == const0_rtx)
2147 else if (temp == const1_rtx)
2152 /* Look for happy constants in op1 and op2. */
2153 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2155 HOST_WIDE_INT t = INTVAL (op1);
2156 HOST_WIDE_INT f = INTVAL (op2);
2158 if (t == STORE_FLAG_VALUE && f == 0)
2159 code = GET_CODE (op0);
2160 else if (t == 0 && f == STORE_FLAG_VALUE)
2163 tmp = reversed_comparison_code (op0, NULL_RTX);
2171 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2183 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2184 Return 0 if no simplifications is possible. */
2186 simplify_subreg (outermode, op, innermode, byte)
2189 enum machine_mode outermode, innermode;
2191 /* Little bit of sanity checking. */
2192 if (innermode == VOIDmode || outermode == VOIDmode
2193 || innermode == BLKmode || outermode == BLKmode)
2196 if (GET_MODE (op) != innermode
2197 && GET_MODE (op) != VOIDmode)
2200 if (byte % GET_MODE_SIZE (outermode)
2201 || byte >= GET_MODE_SIZE (innermode))
2204 if (outermode == innermode && !byte)
2207 /* Attempt to simplify constant to non-SUBREG expression. */
2208 if (CONSTANT_P (op))
2211 unsigned HOST_WIDE_INT val;
2213 /* ??? This code is partly redundant with code bellow, but can handle
2214 the subregs of floats and similar corner cases.
2215 Later it we should move all simplification code here and rewrite
2216 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2217 using SIMPLIFY_SUBREG. */
2218 if (subreg_lowpart_offset (outermode, innermode) == byte)
2220 rtx new = gen_lowpart_if_possible (outermode, op);
2225 /* Similar comment as above apply here. */
2226 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2227 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2228 && GET_MODE_CLASS (outermode) == MODE_INT)
2230 rtx new = constant_subword (op,
2231 (byte / UNITS_PER_WORD),
2237 offset = byte * BITS_PER_UNIT;
2238 switch (GET_CODE (op))
2241 if (GET_MODE (op) != VOIDmode)
2244 /* We can't handle this case yet. */
2245 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2248 part = offset >= HOST_BITS_PER_WIDE_INT;
2249 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2250 && BYTES_BIG_ENDIAN)
2251 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2252 && WORDS_BIG_ENDIAN))
2254 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2255 offset %= HOST_BITS_PER_WIDE_INT;
2257 /* We've already picked the word we want from a double, so
2258 pretend this is actually an integer. */
2259 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2263 if (GET_CODE (op) == CONST_INT)
2266 /* We don't handle synthetizing of non-integral constants yet. */
2267 if (GET_MODE_CLASS (outermode) != MODE_INT)
2270 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2272 if (WORDS_BIG_ENDIAN)
2273 offset = (GET_MODE_BITSIZE (innermode)
2274 - GET_MODE_BITSIZE (outermode) - offset);
2275 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2276 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2277 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2278 - 2 * (offset % BITS_PER_WORD));
2281 if (offset >= HOST_BITS_PER_WIDE_INT)
2282 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2286 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2287 val = trunc_int_for_mode (val, outermode);
2288 return GEN_INT (val);
2295 /* Changing mode twice with SUBREG => just change it once,
2296 or not at all if changing back op starting mode. */
2297 if (GET_CODE (op) == SUBREG)
2299 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2300 int final_offset = byte + SUBREG_BYTE (op);
2303 if (outermode == innermostmode
2304 && byte == 0 && SUBREG_BYTE (op) == 0)
2305 return SUBREG_REG (op);
2307 /* The SUBREG_BYTE represents offset, as if the value were stored
2308 in memory. Irritating exception is paradoxical subreg, where
2309 we define SUBREG_BYTE to be 0. On big endian machines, this
2310 value should be negative. For a moment, undo this exception. */
2311 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2313 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2314 if (WORDS_BIG_ENDIAN)
2315 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2316 if (BYTES_BIG_ENDIAN)
2317 final_offset += difference % UNITS_PER_WORD;
2319 if (SUBREG_BYTE (op) == 0
2320 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2322 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2323 if (WORDS_BIG_ENDIAN)
2324 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2325 if (BYTES_BIG_ENDIAN)
2326 final_offset += difference % UNITS_PER_WORD;
2329 /* See whether resulting subreg will be paradoxical. */
2330 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2332 /* In nonparadoxical subregs we can't handle negative offsets. */
2333 if (final_offset < 0)
2335 /* Bail out in case resulting subreg would be incorrect. */
2336 if (final_offset % GET_MODE_SIZE (outermode)
2337 || final_offset >= GET_MODE_SIZE (innermostmode))
2343 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2345 /* In paradoxical subreg, see if we are still looking on lower part.
2346 If so, our SUBREG_BYTE will be 0. */
2347 if (WORDS_BIG_ENDIAN)
2348 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2349 if (BYTES_BIG_ENDIAN)
2350 offset += difference % UNITS_PER_WORD;
2351 if (offset == final_offset)
2357 /* Recurse for futher possible simplifications. */
2358 new = simplify_subreg (outermode, SUBREG_REG (op),
2359 GET_MODE (SUBREG_REG (op)),
2363 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2366 /* SUBREG of a hard register => just change the register number
2367 and/or mode. If the hard register is not valid in that mode,
2368 suppress this simplification. If the hard register is the stack,
2369 frame, or argument pointer, leave this as a SUBREG. */
2372 && (! REG_FUNCTION_VALUE_P (op)
2373 || ! rtx_equal_function_value_matters)
2374 #ifdef CLASS_CANNOT_CHANGE_MODE
2375 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2376 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2377 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2378 && (TEST_HARD_REG_BIT
2379 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2382 && REGNO (op) < FIRST_PSEUDO_REGISTER
2383 && ((reload_completed && !frame_pointer_needed)
2384 || (REGNO (op) != FRAME_POINTER_REGNUM
2385 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2386 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2389 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2390 && REGNO (op) != ARG_POINTER_REGNUM
2392 && REGNO (op) != STACK_POINTER_REGNUM)
2394 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2397 /* ??? We do allow it if the current REG is not valid for
2398 its mode. This is a kludge to work around how float/complex
2399 arguments are passed on 32-bit Sparc and should be fixed. */
2400 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2401 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2402 return gen_rtx_REG (outermode, final_regno);
2405 /* If we have a SUBREG of a register that we are replacing and we are
2406 replacing it with a MEM, make a new MEM and try replacing the
2407 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2408 or if we would be widening it. */
2410 if (GET_CODE (op) == MEM
2411 && ! mode_dependent_address_p (XEXP (op, 0))
2412 /* Allow splitting of volatile memory references in case we don't
2413 have instruction to move the whole thing. */
2414 && (! MEM_VOLATILE_P (op)
2415 || (mov_optab->handlers[(int) innermode].insn_code
2416 == CODE_FOR_nothing))
2417 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2421 new = gen_rtx_MEM (outermode, plus_constant (XEXP (op, 0), byte));
2422 MEM_COPY_ATTRIBUTES (new, op);
2426 /* Handle complex values represented as CONCAT
2427 of real and imaginary part. */
2428 if (GET_CODE (op) == CONCAT)
2430 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2431 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2432 unsigned int final_offset;
2434 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2435 return simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2440 /* Make a SUBREG operation or equivalent if it folds. */
2443 simplify_gen_subreg (outermode, op, innermode, byte)
2446 enum machine_mode outermode, innermode;
2449 /* Little bit of sanity checking. */
2450 if (innermode == VOIDmode || outermode == VOIDmode
2451 || innermode == BLKmode || outermode == BLKmode)
2454 if (GET_MODE (op) != innermode
2455 && GET_MODE (op) != VOIDmode)
2458 if (byte % GET_MODE_SIZE (outermode)
2459 || byte >= GET_MODE_SIZE (innermode))
2462 new = simplify_subreg (outermode, op, innermode, byte);
2466 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2469 return gen_rtx_SUBREG (outermode, op, byte);
2471 /* Simplify X, an rtx expression.
2473 Return the simplified expression or NULL if no simplifications
2476 This is the preferred entry point into the simplification routines;
2477 however, we still allow passes to call the more specific routines.
2479 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2480 code that need to be unified.
2482 1. fold_rtx in cse.c. This code uses various CSE specific
2483 information to aid in RTL simplification.
2485 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2486 it uses combine specific information to aid in RTL
2489 3. The routines in this file.
2492 Long term we want to only have one body of simplification code; to
2493 get to that state I recommend the following steps:
2495 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2496 which are not pass dependent state into these routines.
2498 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2499 use this routine whenever possible.
2501 3. Allow for pass dependent state to be provided to these
2502 routines and add simplifications based on the pass dependent
2503 state. Remove code from cse.c & combine.c that becomes
2506 It will take time, but ultimately the compiler will be easier to
2507 maintain and improve. It's totally silly that when we add a
2508 simplification that it needs to be added to 4 places (3 for RTL
2509 simplification and 1 for tree simplification. */
2515 enum rtx_code code = GET_CODE (x);
2516 enum machine_mode mode = GET_MODE (x);
2518 switch (GET_RTX_CLASS (code))
2521 return simplify_unary_operation (code, mode,
2522 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2525 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2529 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2530 XEXP (x, 0), XEXP (x, 1),
2534 return simplify_relational_operation (code,
2535 ((GET_MODE (XEXP (x, 0))
2537 ? GET_MODE (XEXP (x, 0))
2538 : GET_MODE (XEXP (x, 1))),
2539 XEXP (x, 0), XEXP (x, 1));
2541 /* The only case we try to handle is a SUBREG. */
2543 return simplify_gen_subreg (mode, SUBREG_REG (x),
2544 GET_MODE (SUBREG_REG (x)),