1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
64 neg_const_int (enum machine_mode mode, rtx i)
66 return gen_int_mode (- INTVAL (i), mode);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
74 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code) == 'c'
81 && swap_commutative_operands_p (op0, op1))
82 tem = op0, op0 = op1, op1 = tem;
84 /* If this simplifies, do it. */
85 tem = simplify_binary_operation (code, mode, op0, op1);
89 /* Handle addition and subtraction specially. Otherwise, just form
92 if (code == PLUS || code == MINUS)
94 tem = simplify_plus_minus (code, mode, op0, op1, 1);
99 return gen_rtx_fmt_ee (code, mode, op0, op1);
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
105 avoid_constant_pool_reference (rtx x)
108 enum machine_mode cmode;
110 switch (GET_CODE (x))
116 /* Handle float extensions of constant pool references. */
118 c = avoid_constant_pool_reference (tmp);
119 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
123 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr = (*targetm.delegitimize_address) (addr);
137 if (GET_CODE (addr) == LO_SUM)
138 addr = XEXP (addr, 1);
140 if (GET_CODE (addr) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr))
144 c = get_pool_constant (addr);
145 cmode = get_pool_mode (addr);
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode != GET_MODE (x))
152 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
163 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
164 enum machine_mode op_mode)
168 /* If this simplifies, use it. */
169 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
172 return gen_rtx_fmt_e (code, mode, op);
175 /* Likewise for ternary operations. */
178 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
179 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
183 /* If this simplifies, use it. */
184 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
188 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
196 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
197 enum machine_mode cmp_mode, rtx op0, rtx op1)
201 if (cmp_mode == VOIDmode)
202 cmp_mode = GET_MODE (op0);
203 if (cmp_mode == VOIDmode)
204 cmp_mode = GET_MODE (op1);
206 if (cmp_mode != VOIDmode)
208 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
212 #ifdef FLOAT_STORE_FLAG_VALUE
213 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
216 if (tem == const0_rtx)
217 return CONST0_RTX (mode);
218 if (tem != const_true_rtx)
220 val = FLOAT_STORE_FLAG_VALUE (mode);
221 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
228 /* For the following tests, ensure const0_rtx is op1. */
229 if (swap_commutative_operands_p (op0, op1)
230 || (op0 == const0_rtx && op1 != const0_rtx))
231 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
233 /* If op0 is a compare, extract the comparison arguments from it. */
234 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
235 return simplify_gen_relational (code, mode, VOIDmode,
236 XEXP (op0, 0), XEXP (op0, 1));
238 /* If op0 is a comparison, extract the comparison arguments form it. */
239 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
243 if (GET_MODE (op0) == mode)
245 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
246 XEXP (op0, 0), XEXP (op0, 1));
250 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
252 return simplify_gen_relational (new, mode, VOIDmode,
253 XEXP (op0, 0), XEXP (op0, 1));
257 return gen_rtx_fmt_ee (code, mode, op0, op1);
260 /* Replace all occurrences of OLD in X with NEW and try to simplify the
261 resulting RTX. Return a new RTX which is as simplified as possible. */
264 simplify_replace_rtx (rtx x, rtx old, rtx new)
266 enum rtx_code code = GET_CODE (x);
267 enum machine_mode mode = GET_MODE (x);
268 enum machine_mode op_mode;
271 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
272 to build a new expression substituting recursively. If we can't do
273 anything, return our input. */
278 switch (GET_RTX_CLASS (code))
282 op_mode = GET_MODE (op0);
283 op0 = simplify_replace_rtx (op0, old, new);
284 if (op0 == XEXP (x, 0))
286 return simplify_gen_unary (code, mode, op0, op_mode);
290 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
291 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
292 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
294 return simplify_gen_binary (code, mode, op0, op1);
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old, new);
301 op1 = simplify_replace_rtx (op1, old, new);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old, new);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
320 /* The only case we try to handle is a SUBREG. */
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
324 if (op0 == SUBREG_REG (x))
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
329 return op0 ? op0 : x;
336 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
337 if (op0 == XEXP (x, 0))
339 return replace_equiv_address_nv (x, op0);
341 else if (code == LO_SUM)
343 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
352 return gen_rtx_LO_SUM (mode, op0, op1);
354 else if (code == REG)
356 if (REG_P (old) && REGNO (x) == REGNO (old))
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
371 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
374 unsigned int width = GET_MODE_BITSIZE (mode);
375 rtx trueop = avoid_constant_pool_reference (op);
377 if (code == VEC_DUPLICATE)
379 if (!VECTOR_MODE_P (mode))
381 if (GET_MODE (trueop) != VOIDmode
382 && !VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE (trueop))
385 if (GET_MODE (trueop) != VOIDmode
386 && VECTOR_MODE_P (GET_MODE (trueop))
387 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
389 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
390 || GET_CODE (trueop) == CONST_VECTOR)
392 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
393 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
394 rtvec v = rtvec_alloc (n_elts);
397 if (GET_CODE (trueop) != CONST_VECTOR)
398 for (i = 0; i < n_elts; i++)
399 RTVEC_ELT (v, i) = trueop;
402 enum machine_mode inmode = GET_MODE (trueop);
403 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
404 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
406 if (in_n_elts >= n_elts || n_elts % in_n_elts)
408 for (i = 0; i < n_elts; i++)
409 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
411 return gen_rtx_CONST_VECTOR (mode, v);
414 else if (GET_CODE (op) == CONST)
415 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
417 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
419 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
420 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
421 enum machine_mode opmode = GET_MODE (trueop);
422 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
423 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
424 rtvec v = rtvec_alloc (n_elts);
427 if (op_n_elts != n_elts)
430 for (i = 0; i < n_elts; i++)
432 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
433 CONST_VECTOR_ELT (trueop, i),
434 GET_MODE_INNER (opmode));
437 RTVEC_ELT (v, i) = x;
439 return gen_rtx_CONST_VECTOR (mode, v);
442 /* The order of these tests is critical so that, for example, we don't
443 check the wrong mode (input vs. output) for a conversion operation,
444 such as FIX. At some point, this should be simplified. */
446 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
447 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
449 HOST_WIDE_INT hv, lv;
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
457 REAL_VALUE_FROM_INT (d, lv, hv, mode);
458 d = real_value_truncate (mode, d);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
461 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
462 && (GET_CODE (trueop) == CONST_DOUBLE
463 || GET_CODE (trueop) == CONST_INT))
465 HOST_WIDE_INT hv, lv;
468 if (GET_CODE (trueop) == CONST_INT)
469 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
471 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
473 if (op_mode == VOIDmode)
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
480 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
483 hv = 0, lv &= GET_MODE_MASK (op_mode);
485 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
486 d = real_value_truncate (mode, d);
487 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
490 if (GET_CODE (trueop) == CONST_INT
491 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
493 HOST_WIDE_INT arg0 = INTVAL (trueop);
507 val = (arg0 >= 0 ? arg0 : - arg0);
511 /* Don't use ffs here. Instead, get low order bit and then its
512 number. If arg0 is zero, this will return 0, as desired. */
513 arg0 &= GET_MODE_MASK (mode);
514 val = exact_log2 (arg0 & (- arg0)) + 1;
518 arg0 &= GET_MODE_MASK (mode);
519 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
522 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
526 arg0 &= GET_MODE_MASK (mode);
529 /* Even if the value at zero is undefined, we have to come
530 up with some replacement. Seems good enough. */
531 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
532 val = GET_MODE_BITSIZE (mode);
535 val = exact_log2 (arg0 & -arg0);
539 arg0 &= GET_MODE_MASK (mode);
542 val++, arg0 &= arg0 - 1;
546 arg0 &= GET_MODE_MASK (mode);
549 val++, arg0 &= arg0 - 1;
558 /* When zero-extending a CONST_INT, we need to know its
560 if (op_mode == VOIDmode)
562 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
564 /* If we were really extending the mode,
565 we would have to distinguish between zero-extension
566 and sign-extension. */
567 if (width != GET_MODE_BITSIZE (op_mode))
571 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
572 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
578 if (op_mode == VOIDmode)
580 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
582 /* If we were really extending the mode,
583 we would have to distinguish between zero-extension
584 and sign-extension. */
585 if (width != GET_MODE_BITSIZE (op_mode))
589 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
592 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
594 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
595 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
612 val = trunc_int_for_mode (val, mode);
614 return GEN_INT (val);
617 /* We can do some operations on integer CONST_DOUBLEs. Also allow
618 for a DImode operation on a CONST_INT. */
619 else if (GET_MODE (trueop) == VOIDmode
620 && width <= HOST_BITS_PER_WIDE_INT * 2
621 && (GET_CODE (trueop) == CONST_DOUBLE
622 || GET_CODE (trueop) == CONST_INT))
624 unsigned HOST_WIDE_INT l1, lv;
625 HOST_WIDE_INT h1, hv;
627 if (GET_CODE (trueop) == CONST_DOUBLE)
628 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
630 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
640 neg_double (l1, h1, &lv, &hv);
645 neg_double (l1, h1, &lv, &hv);
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
660 lv = exact_log2 (l1 & -l1) + 1;
666 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
667 - HOST_BITS_PER_WIDE_INT;
669 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
670 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
671 lv = GET_MODE_BITSIZE (mode);
677 lv = exact_log2 (l1 & -l1);
679 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
680 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
681 lv = GET_MODE_BITSIZE (mode);
704 /* This is just a change-of-mode, so do nothing. */
709 if (op_mode == VOIDmode)
712 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
716 lv = l1 & GET_MODE_MASK (op_mode);
720 if (op_mode == VOIDmode
721 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
725 lv = l1 & GET_MODE_MASK (op_mode);
726 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
727 && (lv & ((HOST_WIDE_INT) 1
728 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
729 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
731 hv = HWI_SIGN_EXTEND (lv);
742 return immed_double_const (lv, hv, mode);
745 else if (GET_CODE (trueop) == CONST_DOUBLE
746 && GET_MODE_CLASS (mode) == MODE_FLOAT)
748 REAL_VALUE_TYPE d, t;
749 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
754 if (HONOR_SNANS (mode) && real_isnan (&d))
756 real_sqrt (&t, mode, &d);
760 d = REAL_VALUE_ABS (d);
763 d = REAL_VALUE_NEGATE (d);
766 d = real_value_truncate (mode, d);
769 /* All this does is change the mode. */
772 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
779 real_to_target (tmp, &d, GET_MODE (trueop));
780 for (i = 0; i < 4; i++)
782 real_from_target (&d, tmp, mode);
787 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
790 else if (GET_CODE (trueop) == CONST_DOUBLE
791 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
792 && GET_MODE_CLASS (mode) == MODE_INT
793 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
795 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
796 operators are intentionally left unspecified (to ease implementation
797 by target backends), for consistency, this routine implements the
798 same semantics for constant folding as used by the middle-end. */
800 HOST_WIDE_INT xh, xl, th, tl;
801 REAL_VALUE_TYPE x, t;
802 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
806 if (REAL_VALUE_ISNAN (x))
809 /* Test against the signed upper bound. */
810 if (width > HOST_BITS_PER_WIDE_INT)
812 th = ((unsigned HOST_WIDE_INT) 1
813 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
819 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
821 real_from_integer (&t, VOIDmode, tl, th, 0);
822 if (REAL_VALUES_LESS (t, x))
829 /* Test against the signed lower bound. */
830 if (width > HOST_BITS_PER_WIDE_INT)
832 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
838 tl = (HOST_WIDE_INT) -1 << (width - 1);
840 real_from_integer (&t, VOIDmode, tl, th, 0);
841 if (REAL_VALUES_LESS (x, t))
847 REAL_VALUE_TO_INT (&xl, &xh, x);
851 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
854 /* Test against the unsigned upper bound. */
855 if (width == 2*HOST_BITS_PER_WIDE_INT)
860 else if (width >= HOST_BITS_PER_WIDE_INT)
862 th = ((unsigned HOST_WIDE_INT) 1
863 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
869 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
871 real_from_integer (&t, VOIDmode, tl, th, 1);
872 if (REAL_VALUES_LESS (t, x))
879 REAL_VALUE_TO_INT (&xl, &xh, x);
885 return immed_double_const (xl, xh, mode);
888 /* This was formerly used only for non-IEEE float.
889 eggert@twinsun.com says it is safe for IEEE also. */
892 enum rtx_code reversed;
895 /* There are some simplifications we can do even if the operands
900 /* (not (not X)) == X. */
901 if (GET_CODE (op) == NOT)
904 /* (not (eq X Y)) == (ne X Y), etc. */
905 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
906 && (mode == BImode || STORE_FLAG_VALUE == -1)
907 && ((reversed = reversed_comparison_code (op, NULL_RTX))
909 return simplify_gen_relational (reversed, mode, VOIDmode,
910 XEXP (op, 0), XEXP (op, 1));
912 /* (not (plus X -1)) can become (neg X). */
913 if (GET_CODE (op) == PLUS
914 && XEXP (op, 1) == constm1_rtx)
915 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
917 /* Similarly, (not (neg X)) is (plus X -1). */
918 if (GET_CODE (op) == NEG)
919 return plus_constant (XEXP (op, 0), -1);
921 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
922 if (GET_CODE (op) == XOR
923 && GET_CODE (XEXP (op, 1)) == CONST_INT
924 && (temp = simplify_unary_operation (NOT, mode,
927 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
930 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
931 operands other than 1, but that is not valid. We could do a
932 similar simplification for (not (lshiftrt C X)) where C is
933 just the sign bit, but this doesn't seem common enough to
935 if (GET_CODE (op) == ASHIFT
936 && XEXP (op, 0) == const1_rtx)
938 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
939 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
942 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
943 by reversing the comparison code if valid. */
944 if (STORE_FLAG_VALUE == -1
945 && GET_RTX_CLASS (GET_CODE (op)) == '<'
946 && (reversed = reversed_comparison_code (op, NULL_RTX))
948 return simplify_gen_relational (reversed, mode, VOIDmode,
949 XEXP (op, 0), XEXP (op, 1));
951 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
952 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
953 so we can perform the above simplification. */
955 if (STORE_FLAG_VALUE == -1
956 && GET_CODE (op) == ASHIFTRT
957 && GET_CODE (XEXP (op, 1)) == CONST_INT
958 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
959 return simplify_gen_relational (GE, mode, VOIDmode,
960 XEXP (op, 0), const0_rtx);
965 /* (neg (neg X)) == X. */
966 if (GET_CODE (op) == NEG)
969 /* (neg (plus X 1)) can become (not X). */
970 if (GET_CODE (op) == PLUS
971 && XEXP (op, 1) == const1_rtx)
972 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
974 /* Similarly, (neg (not X)) is (plus X 1). */
975 if (GET_CODE (op) == NOT)
976 return plus_constant (XEXP (op, 0), 1);
978 /* (neg (minus X Y)) can become (minus Y X). This transformation
979 isn't safe for modes with signed zeros, since if X and Y are
980 both +0, (minus Y X) is the same as (minus X Y). If the
981 rounding mode is towards +infinity (or -infinity) then the two
982 expressions will be rounded differently. */
983 if (GET_CODE (op) == MINUS
984 && !HONOR_SIGNED_ZEROS (mode)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
986 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
989 if (GET_CODE (op) == PLUS
990 && !HONOR_SIGNED_ZEROS (mode)
991 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
993 /* (neg (plus A C)) is simplified to (minus -C A). */
994 if (GET_CODE (XEXP (op, 1)) == CONST_INT
995 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
997 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
1000 return simplify_gen_binary (MINUS, mode, temp,
1004 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1005 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1006 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1009 /* (neg (mult A B)) becomes (mult (neg A) B).
1010 This works even for floating-point values. */
1011 if (GET_CODE (op) == MULT
1012 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1014 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1015 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1018 /* NEG commutes with ASHIFT since it is multiplication. Only do
1019 this if we can then eliminate the NEG (e.g., if the operand
1021 if (GET_CODE (op) == ASHIFT)
1023 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1026 return simplify_gen_binary (ASHIFT, mode, temp,
1033 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1034 becomes just the MINUS if its mode is MODE. This allows
1035 folding switch statements on machines using casesi (such as
1037 if (GET_CODE (op) == TRUNCATE
1038 && GET_MODE (XEXP (op, 0)) == mode
1039 && GET_CODE (XEXP (op, 0)) == MINUS
1040 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1041 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1042 return XEXP (op, 0);
1044 /* Check for a sign extension of a subreg of a promoted
1045 variable, where the promotion is sign-extended, and the
1046 target mode is the same as the variable's promotion. */
1047 if (GET_CODE (op) == SUBREG
1048 && SUBREG_PROMOTED_VAR_P (op)
1049 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1050 && GET_MODE (XEXP (op, 0)) == mode)
1051 return XEXP (op, 0);
1053 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1054 if (! POINTERS_EXTEND_UNSIGNED
1055 && mode == Pmode && GET_MODE (op) == ptr_mode
1057 || (GET_CODE (op) == SUBREG
1058 && GET_CODE (SUBREG_REG (op)) == REG
1059 && REG_POINTER (SUBREG_REG (op))
1060 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1061 return convert_memory_address (Pmode, op);
1066 /* Check for a zero extension of a subreg of a promoted
1067 variable, where the promotion is zero-extended, and the
1068 target mode is the same as the variable's promotion. */
1069 if (GET_CODE (op) == SUBREG
1070 && SUBREG_PROMOTED_VAR_P (op)
1071 && SUBREG_PROMOTED_UNSIGNED_P (op)
1072 && GET_MODE (XEXP (op, 0)) == mode)
1073 return XEXP (op, 0);
1075 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1076 if (POINTERS_EXTEND_UNSIGNED > 0
1077 && mode == Pmode && GET_MODE (op) == ptr_mode
1079 || (GET_CODE (op) == SUBREG
1080 && GET_CODE (SUBREG_REG (op)) == REG
1081 && REG_POINTER (SUBREG_REG (op))
1082 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1083 return convert_memory_address (Pmode, op);
1095 /* Subroutine of simplify_binary_operation to simplify a commutative,
1096 associative binary operation CODE with result mode MODE, operating
1097 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1098 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1099 canonicalization is possible. */
1102 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1107 /* Linearize the operator to the left. */
1108 if (GET_CODE (op1) == code)
1110 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1111 if (GET_CODE (op0) == code)
1113 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1114 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1117 /* "a op (b op c)" becomes "(b op c) op a". */
1118 if (! swap_commutative_operands_p (op1, op0))
1119 return simplify_gen_binary (code, mode, op1, op0);
1126 if (GET_CODE (op0) == code)
1128 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1129 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1131 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1132 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1135 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1136 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1137 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1138 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1140 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1142 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1143 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1144 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1145 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1147 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1153 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1154 and OP1. Return 0 if no simplification is possible.
1156 Don't use this for relational operations such as EQ or LT.
1157 Use simplify_relational_operation instead. */
1159 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1162 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1164 unsigned int width = GET_MODE_BITSIZE (mode);
1165 rtx trueop0, trueop1;
1168 /* Relational operations don't work here. We must know the mode
1169 of the operands in order to do the comparison correctly.
1170 Assuming a full word can give incorrect results.
1171 Consider comparing 128 with -128 in QImode. */
1173 if (GET_RTX_CLASS (code) == '<')
1176 /* Make sure the constant is second. */
1177 if (GET_RTX_CLASS (code) == 'c'
1178 && swap_commutative_operands_p (op0, op1))
1180 tem = op0, op0 = op1, op1 = tem;
1183 trueop0 = avoid_constant_pool_reference (op0);
1184 trueop1 = avoid_constant_pool_reference (op1);
1186 if (VECTOR_MODE_P (mode)
1187 && GET_CODE (trueop0) == CONST_VECTOR
1188 && GET_CODE (trueop1) == CONST_VECTOR)
1190 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1191 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1192 enum machine_mode op0mode = GET_MODE (trueop0);
1193 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1194 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1195 enum machine_mode op1mode = GET_MODE (trueop1);
1196 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1197 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1198 rtvec v = rtvec_alloc (n_elts);
1201 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1204 for (i = 0; i < n_elts; i++)
1206 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1207 CONST_VECTOR_ELT (trueop0, i),
1208 CONST_VECTOR_ELT (trueop1, i));
1211 RTVEC_ELT (v, i) = x;
1214 return gen_rtx_CONST_VECTOR (mode, v);
1217 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1218 && GET_CODE (trueop0) == CONST_DOUBLE
1219 && GET_CODE (trueop1) == CONST_DOUBLE
1220 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1231 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1233 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1235 for (i = 0; i < 4; i++)
1239 else if (code == IOR)
1241 else if (code == XOR)
1246 real_from_target (&r, tmp0, mode);
1247 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1251 REAL_VALUE_TYPE f0, f1, value;
1253 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1254 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1255 f0 = real_value_truncate (mode, f0);
1256 f1 = real_value_truncate (mode, f1);
1258 if (HONOR_SNANS (mode)
1259 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1263 && REAL_VALUES_EQUAL (f1, dconst0)
1264 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1267 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1269 value = real_value_truncate (mode, value);
1270 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1274 /* We can fold some multi-word operations. */
1275 if (GET_MODE_CLASS (mode) == MODE_INT
1276 && width == HOST_BITS_PER_WIDE_INT * 2
1277 && (GET_CODE (trueop0) == CONST_DOUBLE
1278 || GET_CODE (trueop0) == CONST_INT)
1279 && (GET_CODE (trueop1) == CONST_DOUBLE
1280 || GET_CODE (trueop1) == CONST_INT))
1282 unsigned HOST_WIDE_INT l1, l2, lv;
1283 HOST_WIDE_INT h1, h2, hv;
1285 if (GET_CODE (trueop0) == CONST_DOUBLE)
1286 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1288 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1290 if (GET_CODE (trueop1) == CONST_DOUBLE)
1291 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1293 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1298 /* A - B == A + (-B). */
1299 neg_double (l2, h2, &lv, &hv);
1302 /* Fall through.... */
1305 add_double (l1, h1, l2, h2, &lv, &hv);
1309 mul_double (l1, h1, l2, h2, &lv, &hv);
1312 case DIV: case MOD: case UDIV: case UMOD:
1313 /* We'd need to include tree.h to do this and it doesn't seem worth
1318 lv = l1 & l2, hv = h1 & h2;
1322 lv = l1 | l2, hv = h1 | h2;
1326 lv = l1 ^ l2, hv = h1 ^ h2;
1332 && ((unsigned HOST_WIDE_INT) l1
1333 < (unsigned HOST_WIDE_INT) l2)))
1342 && ((unsigned HOST_WIDE_INT) l1
1343 > (unsigned HOST_WIDE_INT) l2)))
1350 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1352 && ((unsigned HOST_WIDE_INT) l1
1353 < (unsigned HOST_WIDE_INT) l2)))
1360 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1362 && ((unsigned HOST_WIDE_INT) l1
1363 > (unsigned HOST_WIDE_INT) l2)))
1369 case LSHIFTRT: case ASHIFTRT:
1371 case ROTATE: case ROTATERT:
1372 if (SHIFT_COUNT_TRUNCATED)
1373 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1375 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1378 if (code == LSHIFTRT || code == ASHIFTRT)
1379 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1381 else if (code == ASHIFT)
1382 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1383 else if (code == ROTATE)
1384 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1385 else /* code == ROTATERT */
1386 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1393 return immed_double_const (lv, hv, mode);
1396 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1397 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1399 /* Even if we can't compute a constant result,
1400 there are some cases worth simplifying. */
1405 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1406 when x is NaN, infinite, or finite and nonzero. They aren't
1407 when x is -0 and the rounding mode is not towards -infinity,
1408 since (-0) + 0 is then 0. */
1409 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1412 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1413 transformations are safe even for IEEE. */
1414 if (GET_CODE (op0) == NEG)
1415 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1416 else if (GET_CODE (op1) == NEG)
1417 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1419 /* (~a) + 1 -> -a */
1420 if (INTEGRAL_MODE_P (mode)
1421 && GET_CODE (op0) == NOT
1422 && trueop1 == const1_rtx)
1423 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1425 /* Handle both-operands-constant cases. We can only add
1426 CONST_INTs to constants since the sum of relocatable symbols
1427 can't be handled by most assemblers. Don't add CONST_INT
1428 to CONST_INT since overflow won't be computed properly if wider
1429 than HOST_BITS_PER_WIDE_INT. */
1431 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1432 && GET_CODE (op1) == CONST_INT)
1433 return plus_constant (op0, INTVAL (op1));
1434 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1435 && GET_CODE (op0) == CONST_INT)
1436 return plus_constant (op1, INTVAL (op0));
1438 /* See if this is something like X * C - X or vice versa or
1439 if the multiplication is written as a shift. If so, we can
1440 distribute and make a new multiply, shift, or maybe just
1441 have X (if C is 2 in the example above). But don't make
1442 real multiply if we didn't have one before. */
1444 if (! FLOAT_MODE_P (mode))
1446 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1447 rtx lhs = op0, rhs = op1;
1450 if (GET_CODE (lhs) == NEG)
1451 coeff0 = -1, lhs = XEXP (lhs, 0);
1452 else if (GET_CODE (lhs) == MULT
1453 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1455 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1458 else if (GET_CODE (lhs) == ASHIFT
1459 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1460 && INTVAL (XEXP (lhs, 1)) >= 0
1461 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1463 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1464 lhs = XEXP (lhs, 0);
1467 if (GET_CODE (rhs) == NEG)
1468 coeff1 = -1, rhs = XEXP (rhs, 0);
1469 else if (GET_CODE (rhs) == MULT
1470 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1472 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1475 else if (GET_CODE (rhs) == ASHIFT
1476 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1477 && INTVAL (XEXP (rhs, 1)) >= 0
1478 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1480 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1481 rhs = XEXP (rhs, 0);
1484 if (rtx_equal_p (lhs, rhs))
1486 tem = simplify_gen_binary (MULT, mode, lhs,
1487 GEN_INT (coeff0 + coeff1));
1488 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1492 /* If one of the operands is a PLUS or a MINUS, see if we can
1493 simplify this by the associative law.
1494 Don't use the associative law for floating point.
1495 The inaccuracy makes it nonassociative,
1496 and subtle programs can break if operations are associated. */
1498 if (INTEGRAL_MODE_P (mode)
1499 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1500 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1501 || (GET_CODE (op0) == CONST
1502 && GET_CODE (XEXP (op0, 0)) == PLUS)
1503 || (GET_CODE (op1) == CONST
1504 && GET_CODE (XEXP (op1, 0)) == PLUS))
1505 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1508 /* Reassociate floating point addition only when the user
1509 specifies unsafe math optimizations. */
1510 if (FLOAT_MODE_P (mode)
1511 && flag_unsafe_math_optimizations)
1513 tem = simplify_associative_operation (code, mode, op0, op1);
1521 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1522 using cc0, in which case we want to leave it as a COMPARE
1523 so we can distinguish it from a register-register-copy.
1525 In IEEE floating point, x-0 is not the same as x. */
1527 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1528 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1529 && trueop1 == CONST0_RTX (mode))
1533 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1534 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1535 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1536 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1538 rtx xop00 = XEXP (op0, 0);
1539 rtx xop10 = XEXP (op1, 0);
1542 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1544 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1545 && GET_MODE (xop00) == GET_MODE (xop10)
1546 && REGNO (xop00) == REGNO (xop10)
1547 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1548 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1555 /* We can't assume x-x is 0 even with non-IEEE floating point,
1556 but since it is zero except in very strange circumstances, we
1557 will treat it as zero with -funsafe-math-optimizations. */
1558 if (rtx_equal_p (trueop0, trueop1)
1559 && ! side_effects_p (op0)
1560 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1561 return CONST0_RTX (mode);
1563 /* Change subtraction from zero into negation. (0 - x) is the
1564 same as -x when x is NaN, infinite, or finite and nonzero.
1565 But if the mode has signed zeros, and does not round towards
1566 -infinity, then 0 - 0 is 0, not -0. */
1567 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1568 return simplify_gen_unary (NEG, mode, op1, mode);
1570 /* (-1 - a) is ~a. */
1571 if (trueop0 == constm1_rtx)
1572 return simplify_gen_unary (NOT, mode, op1, mode);
1574 /* Subtracting 0 has no effect unless the mode has signed zeros
1575 and supports rounding towards -infinity. In such a case,
1577 if (!(HONOR_SIGNED_ZEROS (mode)
1578 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1579 && trueop1 == CONST0_RTX (mode))
1582 /* See if this is something like X * C - X or vice versa or
1583 if the multiplication is written as a shift. If so, we can
1584 distribute and make a new multiply, shift, or maybe just
1585 have X (if C is 2 in the example above). But don't make
1586 real multiply if we didn't have one before. */
1588 if (! FLOAT_MODE_P (mode))
1590 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1591 rtx lhs = op0, rhs = op1;
1594 if (GET_CODE (lhs) == NEG)
1595 coeff0 = -1, lhs = XEXP (lhs, 0);
1596 else if (GET_CODE (lhs) == MULT
1597 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1599 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1602 else if (GET_CODE (lhs) == ASHIFT
1603 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1604 && INTVAL (XEXP (lhs, 1)) >= 0
1605 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1607 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1608 lhs = XEXP (lhs, 0);
1611 if (GET_CODE (rhs) == NEG)
1612 coeff1 = - 1, rhs = XEXP (rhs, 0);
1613 else if (GET_CODE (rhs) == MULT
1614 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1616 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1619 else if (GET_CODE (rhs) == ASHIFT
1620 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1621 && INTVAL (XEXP (rhs, 1)) >= 0
1622 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1624 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1625 rhs = XEXP (rhs, 0);
1628 if (rtx_equal_p (lhs, rhs))
1630 tem = simplify_gen_binary (MULT, mode, lhs,
1631 GEN_INT (coeff0 - coeff1));
1632 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1636 /* (a - (-b)) -> (a + b). True even for IEEE. */
1637 if (GET_CODE (op1) == NEG)
1638 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1640 /* (-x - c) may be simplified as (-c - x). */
1641 if (GET_CODE (op0) == NEG
1642 && (GET_CODE (op1) == CONST_INT
1643 || GET_CODE (op1) == CONST_DOUBLE))
1645 tem = simplify_unary_operation (NEG, mode, op1, mode);
1647 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1650 /* If one of the operands is a PLUS or a MINUS, see if we can
1651 simplify this by the associative law.
1652 Don't use the associative law for floating point.
1653 The inaccuracy makes it nonassociative,
1654 and subtle programs can break if operations are associated. */
1656 if (INTEGRAL_MODE_P (mode)
1657 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1658 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1659 || (GET_CODE (op0) == CONST
1660 && GET_CODE (XEXP (op0, 0)) == PLUS)
1661 || (GET_CODE (op1) == CONST
1662 && GET_CODE (XEXP (op1, 0)) == PLUS))
1663 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1666 /* Don't let a relocatable value get a negative coeff. */
1667 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1668 return simplify_gen_binary (PLUS, mode,
1670 neg_const_int (mode, op1));
1672 /* (x - (x & y)) -> (x & ~y) */
1673 if (GET_CODE (op1) == AND)
1675 if (rtx_equal_p (op0, XEXP (op1, 0)))
1677 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1678 GET_MODE (XEXP (op1, 1)));
1679 return simplify_gen_binary (AND, mode, op0, tem);
1681 if (rtx_equal_p (op0, XEXP (op1, 1)))
1683 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1684 GET_MODE (XEXP (op1, 0)));
1685 return simplify_gen_binary (AND, mode, op0, tem);
1691 if (trueop1 == constm1_rtx)
1692 return simplify_gen_unary (NEG, mode, op0, mode);
1694 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1695 x is NaN, since x * 0 is then also NaN. Nor is it valid
1696 when the mode has signed zeros, since multiplying a negative
1697 number by 0 will give -0, not 0. */
1698 if (!HONOR_NANS (mode)
1699 && !HONOR_SIGNED_ZEROS (mode)
1700 && trueop1 == CONST0_RTX (mode)
1701 && ! side_effects_p (op0))
1704 /* In IEEE floating point, x*1 is not equivalent to x for
1706 if (!HONOR_SNANS (mode)
1707 && trueop1 == CONST1_RTX (mode))
1710 /* Convert multiply by constant power of two into shift unless
1711 we are still generating RTL. This test is a kludge. */
1712 if (GET_CODE (trueop1) == CONST_INT
1713 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1714 /* If the mode is larger than the host word size, and the
1715 uppermost bit is set, then this isn't a power of two due
1716 to implicit sign extension. */
1717 && (width <= HOST_BITS_PER_WIDE_INT
1718 || val != HOST_BITS_PER_WIDE_INT - 1)
1719 && ! rtx_equal_function_value_matters)
1720 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1722 /* x*2 is x+x and x*(-1) is -x */
1723 if (GET_CODE (trueop1) == CONST_DOUBLE
1724 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1725 && GET_MODE (op0) == mode)
1728 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1730 if (REAL_VALUES_EQUAL (d, dconst2))
1731 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1733 if (REAL_VALUES_EQUAL (d, dconstm1))
1734 return simplify_gen_unary (NEG, mode, op0, mode);
1737 /* Reassociate multiplication, but for floating point MULTs
1738 only when the user specifies unsafe math optimizations. */
1739 if (! FLOAT_MODE_P (mode)
1740 || flag_unsafe_math_optimizations)
1742 tem = simplify_associative_operation (code, mode, op0, op1);
1749 if (trueop1 == const0_rtx)
1751 if (GET_CODE (trueop1) == CONST_INT
1752 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1753 == GET_MODE_MASK (mode)))
1755 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1757 /* A | (~A) -> -1 */
1758 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1759 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1760 && ! side_effects_p (op0)
1761 && GET_MODE_CLASS (mode) != MODE_CC)
1763 tem = simplify_associative_operation (code, mode, op0, op1);
1769 if (trueop1 == const0_rtx)
1771 if (GET_CODE (trueop1) == CONST_INT
1772 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1773 == GET_MODE_MASK (mode)))
1774 return simplify_gen_unary (NOT, mode, op0, mode);
1775 if (trueop0 == trueop1 && ! side_effects_p (op0)
1776 && GET_MODE_CLASS (mode) != MODE_CC)
1778 tem = simplify_associative_operation (code, mode, op0, op1);
1784 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1786 if (GET_CODE (trueop1) == CONST_INT
1787 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1788 == GET_MODE_MASK (mode)))
1790 if (trueop0 == trueop1 && ! side_effects_p (op0)
1791 && GET_MODE_CLASS (mode) != MODE_CC)
1794 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1795 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1796 && ! side_effects_p (op0)
1797 && GET_MODE_CLASS (mode) != MODE_CC)
1799 tem = simplify_associative_operation (code, mode, op0, op1);
1805 /* Convert divide by power of two into shift (divide by 1 handled
1807 if (GET_CODE (trueop1) == CONST_INT
1808 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1809 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1811 /* Fall through.... */
1814 if (trueop1 == CONST1_RTX (mode))
1816 /* On some platforms DIV uses narrower mode than its
1818 rtx x = gen_lowpart_common (mode, op0);
1821 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1822 return gen_lowpart_SUBREG (mode, op0);
1827 /* Maybe change 0 / x to 0. This transformation isn't safe for
1828 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1829 Nor is it safe for modes with signed zeros, since dividing
1830 0 by a negative number gives -0, not 0. */
1831 if (!HONOR_NANS (mode)
1832 && !HONOR_SIGNED_ZEROS (mode)
1833 && trueop0 == CONST0_RTX (mode)
1834 && ! side_effects_p (op1))
1837 /* Change division by a constant into multiplication. Only do
1838 this with -funsafe-math-optimizations. */
1839 else if (GET_CODE (trueop1) == CONST_DOUBLE
1840 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1841 && trueop1 != CONST0_RTX (mode)
1842 && flag_unsafe_math_optimizations)
1845 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1847 if (! REAL_VALUES_EQUAL (d, dconst0))
1849 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1850 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1851 return simplify_gen_binary (MULT, mode, op0, tem);
1857 /* Handle modulus by power of two (mod with 1 handled below). */
1858 if (GET_CODE (trueop1) == CONST_INT
1859 && exact_log2 (INTVAL (trueop1)) > 0)
1860 return simplify_gen_binary (AND, mode, op0,
1861 GEN_INT (INTVAL (op1) - 1));
1863 /* Fall through.... */
1866 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1867 && ! side_effects_p (op0) && ! side_effects_p (op1))
1874 /* Rotating ~0 always results in ~0. */
1875 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1876 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1877 && ! side_effects_p (op1))
1880 /* Fall through.... */
1884 if (trueop1 == const0_rtx)
1886 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1891 if (width <= HOST_BITS_PER_WIDE_INT
1892 && GET_CODE (trueop1) == CONST_INT
1893 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1894 && ! side_effects_p (op0))
1896 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1898 tem = simplify_associative_operation (code, mode, op0, op1);
1904 if (width <= HOST_BITS_PER_WIDE_INT
1905 && GET_CODE (trueop1) == CONST_INT
1906 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1907 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1908 && ! side_effects_p (op0))
1910 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1912 tem = simplify_associative_operation (code, mode, op0, op1);
1918 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1920 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1922 tem = simplify_associative_operation (code, mode, op0, op1);
1928 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1930 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1932 tem = simplify_associative_operation (code, mode, op0, op1);
1941 /* ??? There are simplifications that can be done. */
1945 if (!VECTOR_MODE_P (mode))
1947 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1949 != GET_MODE_INNER (GET_MODE (trueop0)))
1950 || GET_CODE (trueop1) != PARALLEL
1951 || XVECLEN (trueop1, 0) != 1
1952 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1955 if (GET_CODE (trueop0) == CONST_VECTOR)
1956 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1960 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1961 || (GET_MODE_INNER (mode)
1962 != GET_MODE_INNER (GET_MODE (trueop0)))
1963 || GET_CODE (trueop1) != PARALLEL)
1966 if (GET_CODE (trueop0) == CONST_VECTOR)
1968 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1969 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1970 rtvec v = rtvec_alloc (n_elts);
1973 if (XVECLEN (trueop1, 0) != (int) n_elts)
1975 for (i = 0; i < n_elts; i++)
1977 rtx x = XVECEXP (trueop1, 0, i);
1979 if (GET_CODE (x) != CONST_INT)
1981 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1984 return gen_rtx_CONST_VECTOR (mode, v);
1990 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1991 ? GET_MODE (trueop0)
1992 : GET_MODE_INNER (mode));
1993 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1994 ? GET_MODE (trueop1)
1995 : GET_MODE_INNER (mode));
1997 if (!VECTOR_MODE_P (mode)
1998 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1999 != GET_MODE_SIZE (mode)))
2002 if ((VECTOR_MODE_P (op0_mode)
2003 && (GET_MODE_INNER (mode)
2004 != GET_MODE_INNER (op0_mode)))
2005 || (!VECTOR_MODE_P (op0_mode)
2006 && GET_MODE_INNER (mode) != op0_mode))
2009 if ((VECTOR_MODE_P (op1_mode)
2010 && (GET_MODE_INNER (mode)
2011 != GET_MODE_INNER (op1_mode)))
2012 || (!VECTOR_MODE_P (op1_mode)
2013 && GET_MODE_INNER (mode) != op1_mode))
2016 if ((GET_CODE (trueop0) == CONST_VECTOR
2017 || GET_CODE (trueop0) == CONST_INT
2018 || GET_CODE (trueop0) == CONST_DOUBLE)
2019 && (GET_CODE (trueop1) == CONST_VECTOR
2020 || GET_CODE (trueop1) == CONST_INT
2021 || GET_CODE (trueop1) == CONST_DOUBLE))
2023 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2024 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2025 rtvec v = rtvec_alloc (n_elts);
2027 unsigned in_n_elts = 1;
2029 if (VECTOR_MODE_P (op0_mode))
2030 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2031 for (i = 0; i < n_elts; i++)
2035 if (!VECTOR_MODE_P (op0_mode))
2036 RTVEC_ELT (v, i) = trueop0;
2038 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2042 if (!VECTOR_MODE_P (op1_mode))
2043 RTVEC_ELT (v, i) = trueop1;
2045 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2050 return gen_rtx_CONST_VECTOR (mode, v);
2062 /* Get the integer argument values in two forms:
2063 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2065 arg0 = INTVAL (trueop0);
2066 arg1 = INTVAL (trueop1);
2068 if (width < HOST_BITS_PER_WIDE_INT)
2070 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2071 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2074 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2075 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2078 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2079 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2087 /* Compute the value of the arithmetic. */
2092 val = arg0s + arg1s;
2096 val = arg0s - arg1s;
2100 val = arg0s * arg1s;
2105 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2108 val = arg0s / arg1s;
2113 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2116 val = arg0s % arg1s;
2121 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2124 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2129 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2132 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2148 /* If shift count is undefined, don't fold it; let the machine do
2149 what it wants. But truncate it if the machine will do that. */
2153 if (SHIFT_COUNT_TRUNCATED)
2156 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2163 if (SHIFT_COUNT_TRUNCATED)
2166 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2173 if (SHIFT_COUNT_TRUNCATED)
2176 val = arg0s >> arg1;
2178 /* Bootstrap compiler may not have sign extended the right shift.
2179 Manually extend the sign to insure bootstrap cc matches gcc. */
2180 if (arg0s < 0 && arg1 > 0)
2181 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2190 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2191 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2199 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2200 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2204 /* Do nothing here. */
2208 val = arg0s <= arg1s ? arg0s : arg1s;
2212 val = ((unsigned HOST_WIDE_INT) arg0
2213 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2217 val = arg0s > arg1s ? arg0s : arg1s;
2221 val = ((unsigned HOST_WIDE_INT) arg0
2222 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2229 /* ??? There are simplifications that can be done. */
2236 val = trunc_int_for_mode (val, mode);
2238 return GEN_INT (val);
2241 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2244 Rather than test for specific case, we do this by a brute-force method
2245 and do all possible simplifications until no more changes occur. Then
2246 we rebuild the operation.
2248 If FORCE is true, then always generate the rtx. This is used to
2249 canonicalize stuff emitted from simplify_gen_binary. Note that this
2250 can still fail if the rtx is too complex. It won't fail just because
2251 the result is not 'simpler' than the input, however. */
2253 struct simplify_plus_minus_op_data
2260 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2262 const struct simplify_plus_minus_op_data *d1 = p1;
2263 const struct simplify_plus_minus_op_data *d2 = p2;
2265 return (commutative_operand_precedence (d2->op)
2266 - commutative_operand_precedence (d1->op));
2270 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2273 struct simplify_plus_minus_op_data ops[8];
2275 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2279 memset (ops, 0, sizeof ops);
2281 /* Set up the two operands and then expand them until nothing has been
2282 changed. If we run out of room in our array, give up; this should
2283 almost never happen. */
2288 ops[1].neg = (code == MINUS);
2294 for (i = 0; i < n_ops; i++)
2296 rtx this_op = ops[i].op;
2297 int this_neg = ops[i].neg;
2298 enum rtx_code this_code = GET_CODE (this_op);
2307 ops[n_ops].op = XEXP (this_op, 1);
2308 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2311 ops[i].op = XEXP (this_op, 0);
2317 ops[i].op = XEXP (this_op, 0);
2318 ops[i].neg = ! this_neg;
2324 && GET_CODE (XEXP (this_op, 0)) == PLUS
2325 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2326 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2328 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2329 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2330 ops[n_ops].neg = this_neg;
2338 /* ~a -> (-a - 1) */
2341 ops[n_ops].op = constm1_rtx;
2342 ops[n_ops++].neg = this_neg;
2343 ops[i].op = XEXP (this_op, 0);
2344 ops[i].neg = !this_neg;
2352 ops[i].op = neg_const_int (mode, this_op);
2365 /* If we only have two operands, we can't do anything. */
2366 if (n_ops <= 2 && !force)
2369 /* Count the number of CONSTs we didn't split above. */
2370 for (i = 0; i < n_ops; i++)
2371 if (GET_CODE (ops[i].op) == CONST)
2374 /* Now simplify each pair of operands until nothing changes. The first
2375 time through just simplify constants against each other. */
2382 for (i = 0; i < n_ops - 1; i++)
2383 for (j = i + 1; j < n_ops; j++)
2385 rtx lhs = ops[i].op, rhs = ops[j].op;
2386 int lneg = ops[i].neg, rneg = ops[j].neg;
2388 if (lhs != 0 && rhs != 0
2389 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2391 enum rtx_code ncode = PLUS;
2397 tem = lhs, lhs = rhs, rhs = tem;
2399 else if (swap_commutative_operands_p (lhs, rhs))
2400 tem = lhs, lhs = rhs, rhs = tem;
2402 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2404 /* Reject "simplifications" that just wrap the two
2405 arguments in a CONST. Failure to do so can result
2406 in infinite recursion with simplify_binary_operation
2407 when it calls us to simplify CONST operations. */
2409 && ! (GET_CODE (tem) == CONST
2410 && GET_CODE (XEXP (tem, 0)) == ncode
2411 && XEXP (XEXP (tem, 0), 0) == lhs
2412 && XEXP (XEXP (tem, 0), 1) == rhs)
2413 /* Don't allow -x + -1 -> ~x simplifications in the
2414 first pass. This allows us the chance to combine
2415 the -1 with other constants. */
2417 && GET_CODE (tem) == NOT
2418 && XEXP (tem, 0) == rhs))
2421 if (GET_CODE (tem) == NEG)
2422 tem = XEXP (tem, 0), lneg = !lneg;
2423 if (GET_CODE (tem) == CONST_INT && lneg)
2424 tem = neg_const_int (mode, tem), lneg = 0;
2428 ops[j].op = NULL_RTX;
2438 /* Pack all the operands to the lower-numbered entries. */
2439 for (i = 0, j = 0; j < n_ops; j++)
2444 /* Sort the operations based on swap_commutative_operands_p. */
2445 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2447 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2449 && GET_CODE (ops[1].op) == CONST_INT
2450 && CONSTANT_P (ops[0].op)
2452 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2454 /* We suppressed creation of trivial CONST expressions in the
2455 combination loop to avoid recursion. Create one manually now.
2456 The combination loop should have ensured that there is exactly
2457 one CONST_INT, and the sort will have ensured that it is last
2458 in the array and that any other constant will be next-to-last. */
2461 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2462 && CONSTANT_P (ops[n_ops - 2].op))
2464 rtx value = ops[n_ops - 1].op;
2465 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2466 value = neg_const_int (mode, value);
2467 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2471 /* Count the number of CONSTs that we generated. */
2473 for (i = 0; i < n_ops; i++)
2474 if (GET_CODE (ops[i].op) == CONST)
2477 /* Give up if we didn't reduce the number of operands we had. Make
2478 sure we count a CONST as two operands. If we have the same
2479 number of operands, but have made more CONSTs than before, this
2480 is also an improvement, so accept it. */
2482 && (n_ops + n_consts > input_ops
2483 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2486 /* Put a non-negated operand first, if possible. */
2488 for (i = 0; i < n_ops && ops[i].neg; i++)
2491 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2500 /* Now make the result by performing the requested operations. */
2502 for (i = 1; i < n_ops; i++)
2503 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2504 mode, result, ops[i].op);
2509 /* Like simplify_binary_operation except used for relational operators.
2510 MODE is the mode of the operands, not that of the result. If MODE
2511 is VOIDmode, both operands must also be VOIDmode and we compare the
2512 operands in "infinite precision".
2514 If no simplification is possible, this function returns zero. Otherwise,
2515 it returns either const_true_rtx or const0_rtx. */
2518 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2521 int equal, op0lt, op0ltu, op1lt, op1ltu;
2526 if (mode == VOIDmode
2527 && (GET_MODE (op0) != VOIDmode
2528 || GET_MODE (op1) != VOIDmode))
2531 /* If op0 is a compare, extract the comparison arguments from it. */
2532 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2533 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2535 /* We can't simplify MODE_CC values since we don't know what the
2536 actual comparison is. */
2537 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2540 /* Make sure the constant is second. */
2541 if (swap_commutative_operands_p (op0, op1))
2543 tem = op0, op0 = op1, op1 = tem;
2544 code = swap_condition (code);
2547 trueop0 = avoid_constant_pool_reference (op0);
2548 trueop1 = avoid_constant_pool_reference (op1);
2550 /* For integer comparisons of A and B maybe we can simplify A - B and can
2551 then simplify a comparison of that with zero. If A and B are both either
2552 a register or a CONST_INT, this can't help; testing for these cases will
2553 prevent infinite recursion here and speed things up.
2555 If CODE is an unsigned comparison, then we can never do this optimization,
2556 because it gives an incorrect result if the subtraction wraps around zero.
2557 ANSI C defines unsigned operations such that they never overflow, and
2558 thus such cases can not be ignored. */
2560 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2561 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2562 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2563 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2564 /* We cannot do this for == or != if tem is a nonzero address. */
2565 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2566 && code != GTU && code != GEU && code != LTU && code != LEU)
2567 return simplify_relational_operation (signed_condition (code),
2568 mode, tem, const0_rtx);
2570 if (flag_unsafe_math_optimizations && code == ORDERED)
2571 return const_true_rtx;
2573 if (flag_unsafe_math_optimizations && code == UNORDERED)
2576 /* For modes without NaNs, if the two operands are equal, we know the
2577 result except if they have side-effects. */
2578 if (! HONOR_NANS (GET_MODE (trueop0))
2579 && rtx_equal_p (trueop0, trueop1)
2580 && ! side_effects_p (trueop0))
2581 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2583 /* If the operands are floating-point constants, see if we can fold
2585 else if (GET_CODE (trueop0) == CONST_DOUBLE
2586 && GET_CODE (trueop1) == CONST_DOUBLE
2587 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2589 REAL_VALUE_TYPE d0, d1;
2591 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2592 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2594 /* Comparisons are unordered iff at least one of the values is NaN. */
2595 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2605 return const_true_rtx;
2618 equal = REAL_VALUES_EQUAL (d0, d1);
2619 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2620 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2623 /* Otherwise, see if the operands are both integers. */
2624 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2625 && (GET_CODE (trueop0) == CONST_DOUBLE
2626 || GET_CODE (trueop0) == CONST_INT)
2627 && (GET_CODE (trueop1) == CONST_DOUBLE
2628 || GET_CODE (trueop1) == CONST_INT))
2630 int width = GET_MODE_BITSIZE (mode);
2631 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2632 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2634 /* Get the two words comprising each integer constant. */
2635 if (GET_CODE (trueop0) == CONST_DOUBLE)
2637 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2638 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2642 l0u = l0s = INTVAL (trueop0);
2643 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2646 if (GET_CODE (trueop1) == CONST_DOUBLE)
2648 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2649 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2653 l1u = l1s = INTVAL (trueop1);
2654 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2657 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2658 we have to sign or zero-extend the values. */
2659 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2661 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2662 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2664 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2665 l0s |= ((HOST_WIDE_INT) (-1) << width);
2667 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2668 l1s |= ((HOST_WIDE_INT) (-1) << width);
2670 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2671 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2673 equal = (h0u == h1u && l0u == l1u);
2674 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2675 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2676 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2677 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2680 /* Otherwise, there are some code-specific tests we can make. */
2686 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2691 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2692 return const_true_rtx;
2696 /* Unsigned values are never negative. */
2697 if (trueop1 == const0_rtx)
2698 return const_true_rtx;
2702 if (trueop1 == const0_rtx)
2707 /* Unsigned values are never greater than the largest
2709 if (GET_CODE (trueop1) == CONST_INT
2710 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2711 && INTEGRAL_MODE_P (mode))
2712 return const_true_rtx;
2716 if (GET_CODE (trueop1) == CONST_INT
2717 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2718 && INTEGRAL_MODE_P (mode))
2723 /* Optimize abs(x) < 0.0. */
2724 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2726 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2728 if (GET_CODE (tem) == ABS)
2734 /* Optimize abs(x) >= 0.0. */
2735 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2737 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2739 if (GET_CODE (tem) == ABS)
2740 return const_true_rtx;
2745 /* Optimize ! (abs(x) < 0.0). */
2746 if (trueop1 == CONST0_RTX (mode))
2748 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2750 if (GET_CODE (tem) == ABS)
2751 return const_true_rtx;
2762 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2768 return equal ? const_true_rtx : const0_rtx;
2771 return ! equal ? const_true_rtx : const0_rtx;
2774 return op0lt ? const_true_rtx : const0_rtx;
2777 return op1lt ? const_true_rtx : const0_rtx;
2779 return op0ltu ? const_true_rtx : const0_rtx;
2781 return op1ltu ? const_true_rtx : const0_rtx;
2784 return equal || op0lt ? const_true_rtx : const0_rtx;
2787 return equal || op1lt ? const_true_rtx : const0_rtx;
2789 return equal || op0ltu ? const_true_rtx : const0_rtx;
2791 return equal || op1ltu ? const_true_rtx : const0_rtx;
2793 return const_true_rtx;
2801 /* Simplify CODE, an operation with result mode MODE and three operands,
2802 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2803 a constant. Return 0 if no simplifications is possible. */
2806 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2807 enum machine_mode op0_mode, rtx op0, rtx op1,
2810 unsigned int width = GET_MODE_BITSIZE (mode);
2812 /* VOIDmode means "infinite" precision. */
2814 width = HOST_BITS_PER_WIDE_INT;
2820 if (GET_CODE (op0) == CONST_INT
2821 && GET_CODE (op1) == CONST_INT
2822 && GET_CODE (op2) == CONST_INT
2823 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2824 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2826 /* Extracting a bit-field from a constant */
2827 HOST_WIDE_INT val = INTVAL (op0);
2829 if (BITS_BIG_ENDIAN)
2830 val >>= (GET_MODE_BITSIZE (op0_mode)
2831 - INTVAL (op2) - INTVAL (op1));
2833 val >>= INTVAL (op2);
2835 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2837 /* First zero-extend. */
2838 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2839 /* If desired, propagate sign bit. */
2840 if (code == SIGN_EXTRACT
2841 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2842 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2845 /* Clear the bits that don't belong in our mode,
2846 unless they and our sign bit are all one.
2847 So we get either a reasonable negative value or a reasonable
2848 unsigned value for this mode. */
2849 if (width < HOST_BITS_PER_WIDE_INT
2850 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2851 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2852 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2854 return GEN_INT (val);
2859 if (GET_CODE (op0) == CONST_INT)
2860 return op0 != const0_rtx ? op1 : op2;
2862 /* Convert c ? a : a into "a". */
2863 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2866 /* Convert a != b ? a : b into "a". */
2867 if (GET_CODE (op0) == NE
2868 && ! side_effects_p (op0)
2869 && ! HONOR_NANS (mode)
2870 && ! HONOR_SIGNED_ZEROS (mode)
2871 && ((rtx_equal_p (XEXP (op0, 0), op1)
2872 && rtx_equal_p (XEXP (op0, 1), op2))
2873 || (rtx_equal_p (XEXP (op0, 0), op2)
2874 && rtx_equal_p (XEXP (op0, 1), op1))))
2877 /* Convert a == b ? a : b into "b". */
2878 if (GET_CODE (op0) == EQ
2879 && ! side_effects_p (op0)
2880 && ! HONOR_NANS (mode)
2881 && ! HONOR_SIGNED_ZEROS (mode)
2882 && ((rtx_equal_p (XEXP (op0, 0), op1)
2883 && rtx_equal_p (XEXP (op0, 1), op2))
2884 || (rtx_equal_p (XEXP (op0, 0), op2)
2885 && rtx_equal_p (XEXP (op0, 1), op1))))
2888 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2890 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2891 ? GET_MODE (XEXP (op0, 1))
2892 : GET_MODE (XEXP (op0, 0)));
2894 if (cmp_mode == VOIDmode)
2895 cmp_mode = op0_mode;
2896 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2897 XEXP (op0, 0), XEXP (op0, 1));
2899 /* See if any simplifications were possible. */
2900 if (temp == const0_rtx)
2902 else if (temp == const_true_rtx)
2907 /* Look for happy constants in op1 and op2. */
2908 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2910 HOST_WIDE_INT t = INTVAL (op1);
2911 HOST_WIDE_INT f = INTVAL (op2);
2913 if (t == STORE_FLAG_VALUE && f == 0)
2914 code = GET_CODE (op0);
2915 else if (t == 0 && f == STORE_FLAG_VALUE)
2918 tmp = reversed_comparison_code (op0, NULL_RTX);
2926 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2932 if (GET_MODE (op0) != mode
2933 || GET_MODE (op1) != mode
2934 || !VECTOR_MODE_P (mode))
2936 op2 = avoid_constant_pool_reference (op2);
2937 if (GET_CODE (op2) == CONST_INT)
2939 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2940 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2941 int mask = (1 << n_elts) - 1;
2943 if (!(INTVAL (op2) & mask))
2945 if ((INTVAL (op2) & mask) == mask)
2948 op0 = avoid_constant_pool_reference (op0);
2949 op1 = avoid_constant_pool_reference (op1);
2950 if (GET_CODE (op0) == CONST_VECTOR
2951 && GET_CODE (op1) == CONST_VECTOR)
2953 rtvec v = rtvec_alloc (n_elts);
2956 for (i = 0; i < n_elts; i++)
2957 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2958 ? CONST_VECTOR_ELT (op0, i)
2959 : CONST_VECTOR_ELT (op1, i));
2960 return gen_rtx_CONST_VECTOR (mode, v);
2972 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2973 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2975 Works by unpacking OP into a collection of 8-bit values
2976 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2977 and then repacking them again for OUTERMODE. */
2980 simplify_immed_subreg (enum machine_mode outermode, rtx op,
2981 enum machine_mode innermode, unsigned int byte)
2983 /* We support up to 512-bit values (for V8DFmode). */
2987 value_mask = (1 << value_bit) - 1
2989 unsigned char value[max_bitsize / value_bit];
2998 rtvec result_v = NULL;
2999 enum mode_class outer_class;
3000 enum machine_mode outer_submode;
3002 /* Some ports misuse CCmode. */
3003 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3006 /* Unpack the value. */
3008 if (GET_CODE (op) == CONST_VECTOR)
3010 num_elem = CONST_VECTOR_NUNITS (op);
3011 elems = &CONST_VECTOR_ELT (op, 0);
3012 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3018 elem_bitsize = max_bitsize;
3021 if (BITS_PER_UNIT % value_bit != 0)
3022 abort (); /* Too complicated; reducing value_bit may help. */
3023 if (elem_bitsize % BITS_PER_UNIT != 0)
3024 abort (); /* I don't know how to handle endianness of sub-units. */
3026 for (elem = 0; elem < num_elem; elem++)
3029 rtx el = elems[elem];
3031 /* Vectors are kept in target memory order. (This is probably
3034 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3035 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3037 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3038 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3039 unsigned bytele = (subword_byte % UNITS_PER_WORD
3040 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3041 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3044 switch (GET_CODE (el))
3048 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3050 *vp++ = INTVAL (el) >> i;
3051 /* CONST_INTs are always logically sign-extended. */
3052 for (; i < elem_bitsize; i += value_bit)
3053 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3057 if (GET_MODE (el) == VOIDmode)
3059 /* If this triggers, someone should have generated a
3060 CONST_INT instead. */
3061 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3064 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3065 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3066 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3069 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3072 /* It shouldn't matter what's done here, so fill it with
3074 for (; i < max_bitsize; i += value_bit)
3077 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3079 long tmp[max_bitsize / 32];
3080 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3082 if (bitsize > elem_bitsize)
3084 if (bitsize % value_bit != 0)
3087 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3090 /* real_to_target produces its result in words affected by
3091 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3092 and use WORDS_BIG_ENDIAN instead; see the documentation
3093 of SUBREG in rtl.texi. */
3094 for (i = 0; i < bitsize; i += value_bit)
3097 if (WORDS_BIG_ENDIAN)
3098 ibase = bitsize - 1 - i;
3101 *vp++ = tmp[ibase / 32] >> i % 32;
3104 /* It shouldn't matter what's done here, so fill it with
3106 for (; i < elem_bitsize; i += value_bit)
3118 /* Now, pick the right byte to start with. */
3119 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3120 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3121 will already have offset 0. */
3122 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3124 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3126 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3127 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3128 byte = (subword_byte % UNITS_PER_WORD
3129 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3132 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3133 so if it's become negative it will instead be very large.) */
3134 if (byte >= GET_MODE_SIZE (innermode))
3137 /* Convert from bytes to chunks of size value_bit. */
3138 value_start = byte * (BITS_PER_UNIT / value_bit);
3140 /* Re-pack the value. */
3142 if (VECTOR_MODE_P (outermode))
3144 num_elem = GET_MODE_NUNITS (outermode);
3145 result_v = rtvec_alloc (num_elem);
3146 elems = &RTVEC_ELT (result_v, 0);
3147 outer_submode = GET_MODE_INNER (outermode);
3153 outer_submode = outermode;
3156 outer_class = GET_MODE_CLASS (outer_submode);
3157 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3159 if (elem_bitsize % value_bit != 0)
3161 if (elem_bitsize + value_start * value_bit > max_bitsize)
3164 for (elem = 0; elem < num_elem; elem++)
3168 /* Vectors are stored in target memory order. (This is probably
3171 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3172 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3174 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3175 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3176 unsigned bytele = (subword_byte % UNITS_PER_WORD
3177 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3178 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3181 switch (outer_class)
3184 case MODE_PARTIAL_INT:
3186 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3189 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3191 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3192 for (; i < elem_bitsize; i += value_bit)
3193 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3194 << (i - HOST_BITS_PER_WIDE_INT));
3196 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3198 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3199 elems[elem] = gen_int_mode (lo, outer_submode);
3201 elems[elem] = immed_double_const (lo, hi, outer_submode);
3208 long tmp[max_bitsize / 32];
3210 /* real_from_target wants its input in words affected by
3211 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3212 and use WORDS_BIG_ENDIAN instead; see the documentation
3213 of SUBREG in rtl.texi. */
3214 for (i = 0; i < max_bitsize / 32; i++)
3216 for (i = 0; i < elem_bitsize; i += value_bit)
3219 if (WORDS_BIG_ENDIAN)
3220 ibase = elem_bitsize - 1 - i;
3223 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3226 real_from_target (&r, tmp, outer_submode);
3227 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3235 if (VECTOR_MODE_P (outermode))
3236 return gen_rtx_CONST_VECTOR (outermode, result_v);
3241 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3242 Return 0 if no simplifications are possible. */
3244 simplify_subreg (enum machine_mode outermode, rtx op,
3245 enum machine_mode innermode, unsigned int byte)
3247 /* Little bit of sanity checking. */
3248 if (innermode == VOIDmode || outermode == VOIDmode
3249 || innermode == BLKmode || outermode == BLKmode)
3252 if (GET_MODE (op) != innermode
3253 && GET_MODE (op) != VOIDmode)
3256 if (byte % GET_MODE_SIZE (outermode)
3257 || byte >= GET_MODE_SIZE (innermode))
3260 if (outermode == innermode && !byte)
3263 if (GET_CODE (op) == CONST_INT
3264 || GET_CODE (op) == CONST_DOUBLE
3265 || GET_CODE (op) == CONST_VECTOR)
3266 return simplify_immed_subreg (outermode, op, innermode, byte);
3268 /* Changing mode twice with SUBREG => just change it once,
3269 or not at all if changing back op starting mode. */
3270 if (GET_CODE (op) == SUBREG)
3272 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3273 int final_offset = byte + SUBREG_BYTE (op);
3276 if (outermode == innermostmode
3277 && byte == 0 && SUBREG_BYTE (op) == 0)
3278 return SUBREG_REG (op);
3280 /* The SUBREG_BYTE represents offset, as if the value were stored
3281 in memory. Irritating exception is paradoxical subreg, where
3282 we define SUBREG_BYTE to be 0. On big endian machines, this
3283 value should be negative. For a moment, undo this exception. */
3284 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3286 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3287 if (WORDS_BIG_ENDIAN)
3288 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3289 if (BYTES_BIG_ENDIAN)
3290 final_offset += difference % UNITS_PER_WORD;
3292 if (SUBREG_BYTE (op) == 0
3293 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3295 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3296 if (WORDS_BIG_ENDIAN)
3297 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3298 if (BYTES_BIG_ENDIAN)
3299 final_offset += difference % UNITS_PER_WORD;
3302 /* See whether resulting subreg will be paradoxical. */
3303 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3305 /* In nonparadoxical subregs we can't handle negative offsets. */
3306 if (final_offset < 0)
3308 /* Bail out in case resulting subreg would be incorrect. */
3309 if (final_offset % GET_MODE_SIZE (outermode)
3310 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3316 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3318 /* In paradoxical subreg, see if we are still looking on lower part.
3319 If so, our SUBREG_BYTE will be 0. */
3320 if (WORDS_BIG_ENDIAN)
3321 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3322 if (BYTES_BIG_ENDIAN)
3323 offset += difference % UNITS_PER_WORD;
3324 if (offset == final_offset)
3330 /* Recurse for further possible simplifications. */
3331 new = simplify_subreg (outermode, SUBREG_REG (op),
3332 GET_MODE (SUBREG_REG (op)),
3336 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3339 /* SUBREG of a hard register => just change the register number
3340 and/or mode. If the hard register is not valid in that mode,
3341 suppress this simplification. If the hard register is the stack,
3342 frame, or argument pointer, leave this as a SUBREG. */
3345 && (! REG_FUNCTION_VALUE_P (op)
3346 || ! rtx_equal_function_value_matters)
3347 && REGNO (op) < FIRST_PSEUDO_REGISTER
3348 #ifdef CANNOT_CHANGE_MODE_CLASS
3349 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3350 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3351 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3353 && ((reload_completed && !frame_pointer_needed)
3354 || (REGNO (op) != FRAME_POINTER_REGNUM
3355 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3356 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3359 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3360 && REGNO (op) != ARG_POINTER_REGNUM
3362 && REGNO (op) != STACK_POINTER_REGNUM
3363 && subreg_offset_representable_p (REGNO (op), innermode,
3366 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3367 int final_regno = subreg_hard_regno (tem, 0);
3369 /* ??? We do allow it if the current REG is not valid for
3370 its mode. This is a kludge to work around how float/complex
3371 arguments are passed on 32-bit SPARC and should be fixed. */
3372 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3373 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3375 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3377 /* Propagate original regno. We don't have any way to specify
3378 the offset inside original regno, so do so only for lowpart.
3379 The information is used only by alias analysis that can not
3380 grog partial register anyway. */
3382 if (subreg_lowpart_offset (outermode, innermode) == byte)
3383 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3388 /* If we have a SUBREG of a register that we are replacing and we are
3389 replacing it with a MEM, make a new MEM and try replacing the
3390 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3391 or if we would be widening it. */
3393 if (GET_CODE (op) == MEM
3394 && ! mode_dependent_address_p (XEXP (op, 0))
3395 /* Allow splitting of volatile memory references in case we don't
3396 have instruction to move the whole thing. */
3397 && (! MEM_VOLATILE_P (op)
3398 || ! have_insn_for (SET, innermode))
3399 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3400 return adjust_address_nv (op, outermode, byte);
3402 /* Handle complex values represented as CONCAT
3403 of real and imaginary part. */
3404 if (GET_CODE (op) == CONCAT)
3406 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3407 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3408 unsigned int final_offset;
3411 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3412 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3415 /* We can at least simplify it by referring directly to the
3417 return gen_rtx_SUBREG (outermode, part, final_offset);
3420 /* Optimize SUBREG truncations of zero and sign extended values. */
3421 if ((GET_CODE (op) == ZERO_EXTEND
3422 || GET_CODE (op) == SIGN_EXTEND)
3423 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3425 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3427 /* If we're requesting the lowpart of a zero or sign extension,
3428 there are three possibilities. If the outermode is the same
3429 as the origmode, we can omit both the extension and the subreg.
3430 If the outermode is not larger than the origmode, we can apply
3431 the truncation without the extension. Finally, if the outermode
3432 is larger than the origmode, but both are integer modes, we
3433 can just extend to the appropriate mode. */
3436 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3437 if (outermode == origmode)
3438 return XEXP (op, 0);
3439 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3440 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3441 subreg_lowpart_offset (outermode,
3443 if (SCALAR_INT_MODE_P (outermode))
3444 return simplify_gen_unary (GET_CODE (op), outermode,
3445 XEXP (op, 0), origmode);
3448 /* A SUBREG resulting from a zero extension may fold to zero if
3449 it extracts higher bits that the ZERO_EXTEND's source bits. */
3450 if (GET_CODE (op) == ZERO_EXTEND
3451 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3452 return CONST0_RTX (outermode);
3458 /* Make a SUBREG operation or equivalent if it folds. */
3461 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3462 enum machine_mode innermode, unsigned int byte)
3465 /* Little bit of sanity checking. */
3466 if (innermode == VOIDmode || outermode == VOIDmode
3467 || innermode == BLKmode || outermode == BLKmode)
3470 if (GET_MODE (op) != innermode
3471 && GET_MODE (op) != VOIDmode)
3474 if (byte % GET_MODE_SIZE (outermode)
3475 || byte >= GET_MODE_SIZE (innermode))
3478 if (GET_CODE (op) == QUEUED)
3481 new = simplify_subreg (outermode, op, innermode, byte);
3485 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3488 return gen_rtx_SUBREG (outermode, op, byte);
3490 /* Simplify X, an rtx expression.
3492 Return the simplified expression or NULL if no simplifications
3495 This is the preferred entry point into the simplification routines;
3496 however, we still allow passes to call the more specific routines.
3498 Right now GCC has three (yes, three) major bodies of RTL simplification
3499 code that need to be unified.
3501 1. fold_rtx in cse.c. This code uses various CSE specific
3502 information to aid in RTL simplification.
3504 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3505 it uses combine specific information to aid in RTL
3508 3. The routines in this file.
3511 Long term we want to only have one body of simplification code; to
3512 get to that state I recommend the following steps:
3514 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3515 which are not pass dependent state into these routines.
3517 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3518 use this routine whenever possible.
3520 3. Allow for pass dependent state to be provided to these
3521 routines and add simplifications based on the pass dependent
3522 state. Remove code from cse.c & combine.c that becomes
3525 It will take time, but ultimately the compiler will be easier to
3526 maintain and improve. It's totally silly that when we add a
3527 simplification that it needs to be added to 4 places (3 for RTL
3528 simplification and 1 for tree simplification. */
3531 simplify_rtx (rtx x)
3533 enum rtx_code code = GET_CODE (x);
3534 enum machine_mode mode = GET_MODE (x);
3537 switch (GET_RTX_CLASS (code))
3540 return simplify_unary_operation (code, mode,
3541 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3543 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3544 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3546 /* Fall through.... */
3549 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3553 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3554 XEXP (x, 0), XEXP (x, 1),
3558 temp = simplify_relational_operation (code,
3559 ((GET_MODE (XEXP (x, 0))
3561 ? GET_MODE (XEXP (x, 0))
3562 : GET_MODE (XEXP (x, 1))),
3563 XEXP (x, 0), XEXP (x, 1));
3564 #ifdef FLOAT_STORE_FLAG_VALUE
3565 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3567 if (temp == const0_rtx)
3568 temp = CONST0_RTX (mode);
3570 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3578 return simplify_gen_subreg (mode, SUBREG_REG (x),
3579 GET_MODE (SUBREG_REG (x)),
3581 if (code == CONSTANT_P_RTX)
3583 if (CONSTANT_P (XEXP (x, 0)))
3591 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3592 if (GET_CODE (XEXP (x, 0)) == HIGH
3593 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))