1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
218 return gen_rtx_fmt_e (code, mode, op);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
271 switch (GET_RTX_CLASS (code))
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
279 return simplify_gen_unary (code, mode, op0, op_mode);
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return simplify_gen_binary (code, mode, op0, op1);
290 case RTX_COMM_COMPARE:
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
301 case RTX_BITFIELD_OPS:
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
314 /* The only case we try to handle is a SUBREG. */
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
323 return op0 ? op0 : x;
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
333 return replace_equiv_address_nv (x, op0);
335 else if (code == LO_SUM)
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (rtx_equal_p (x, old_rtx))
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
370 if (GET_CODE (op) == CONST)
373 trueop = avoid_constant_pool_reference (op);
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
379 return simplify_unary_operation_1 (code, mode, op);
382 /* Perform some simplifications we can do even if the operands
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 enum rtx_code reversed;
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
492 in2 = in1; in1 = tem;
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
554 if (GET_CODE (op) == ASHIFT)
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx)
588 enum machine_mode inner = GET_MODE (XEXP (op, 0));
589 int isize = GET_MODE_BITSIZE (inner);
590 if (STORE_FLAG_VALUE == 1)
592 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
593 GEN_INT (isize - 1));
596 if (GET_MODE_BITSIZE (mode) > isize)
597 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
598 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 else if (STORE_FLAG_VALUE == -1)
602 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
603 GEN_INT (isize - 1));
606 if (GET_MODE_BITSIZE (mode) > isize)
607 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
608 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
614 /* We can't handle truncation to a partial integer mode here
615 because we don't know the real bitsize of the partial
617 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
620 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
621 if ((GET_CODE (op) == SIGN_EXTEND
622 || GET_CODE (op) == ZERO_EXTEND)
623 && GET_MODE (XEXP (op, 0)) == mode)
626 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
627 (OP:SI foo:SI) if OP is NEG or ABS. */
628 if ((GET_CODE (op) == ABS
629 || GET_CODE (op) == NEG)
630 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
631 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
632 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
633 return simplify_gen_unary (GET_CODE (op), mode,
634 XEXP (XEXP (op, 0), 0), mode);
636 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 if (GET_CODE (op) == SUBREG
639 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
640 && subreg_lowpart_p (op))
641 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
642 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644 /* If we know that the value is already truncated, we can
645 replace the TRUNCATE with a SUBREG. Note that this is also
646 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
647 modes we just have to apply a different definition for
648 truncation. But don't do this for an (LSHIFTRT (MULT ...))
649 since this will cause problems with the umulXi3_highpart
651 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
652 GET_MODE_BITSIZE (GET_MODE (op)))
653 ? (num_sign_bit_copies (op, GET_MODE (op))
654 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
655 - GET_MODE_BITSIZE (mode)))
656 : truncated_to_mode (mode, op))
657 && ! (GET_CODE (op) == LSHIFTRT
658 && GET_CODE (XEXP (op, 0)) == MULT))
659 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661 /* A truncate of a comparison can be replaced with a subreg if
662 STORE_FLAG_VALUE permits. This is like the previous test,
663 but it works even if the comparison is done in a mode larger
664 than HOST_BITS_PER_WIDE_INT. */
665 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
668 return rtl_hooks.gen_lowpart_no_emit (mode, op);
672 if (DECIMAL_FLOAT_MODE_P (mode))
675 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
676 if (GET_CODE (op) == FLOAT_EXTEND
677 && GET_MODE (XEXP (op, 0)) == mode)
680 /* (float_truncate:SF (float_truncate:DF foo:XF))
681 = (float_truncate:SF foo:XF).
682 This may eliminate double rounding, so it is unsafe.
684 (float_truncate:SF (float_extend:XF foo:DF))
685 = (float_truncate:SF foo:DF).
687 (float_truncate:DF (float_extend:XF foo:SF))
688 = (float_extend:SF foo:DF). */
689 if ((GET_CODE (op) == FLOAT_TRUNCATE
690 && flag_unsafe_math_optimizations)
691 || GET_CODE (op) == FLOAT_EXTEND)
692 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 > GET_MODE_SIZE (mode)
695 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
699 /* (float_truncate (float x)) is (float x) */
700 if (GET_CODE (op) == FLOAT
701 && (flag_unsafe_math_optimizations
702 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
703 && ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0))))))))
707 return simplify_gen_unary (FLOAT, mode,
709 GET_MODE (XEXP (op, 0)));
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op) == ABS
714 || GET_CODE (op) == NEG)
715 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
717 return simplify_gen_unary (GET_CODE (op), mode,
718 XEXP (XEXP (op, 0), 0), mode);
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op) == SUBREG
723 && subreg_lowpart_p (op)
724 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
725 return SUBREG_REG (op);
729 if (DECIMAL_FLOAT_MODE_P (mode))
732 /* (float_extend (float_extend x)) is (float_extend x)
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
737 if (GET_CODE (op) == FLOAT_EXTEND
738 || (GET_CODE (op) == FLOAT
739 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
740 && ((unsigned)significand_size (GET_MODE (op))
741 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
742 - num_sign_bit_copies (XEXP (op, 0),
743 GET_MODE (XEXP (op, 0)))))))
744 return simplify_gen_unary (GET_CODE (op), mode,
746 GET_MODE (XEXP (op, 0)));
751 /* (abs (neg <foo>)) -> (abs <foo>) */
752 if (GET_CODE (op) == NEG)
753 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
754 GET_MODE (XEXP (op, 0)));
756 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 if (GET_MODE (op) == VOIDmode)
761 /* If operand is something known to be positive, ignore the ABS. */
762 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
763 || ((GET_MODE_BITSIZE (GET_MODE (op))
764 <= HOST_BITS_PER_WIDE_INT)
765 && ((nonzero_bits (op, GET_MODE (op))
767 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
772 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
773 return gen_rtx_NEG (mode, op);
778 /* (ffs (*_extend <X>)) = (ffs <X>) */
779 if (GET_CODE (op) == SIGN_EXTEND
780 || GET_CODE (op) == ZERO_EXTEND)
781 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
782 GET_MODE (XEXP (op, 0)));
786 switch (GET_CODE (op))
790 /* (popcount (zero_extend <X>)) = (popcount <X>) */
791 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
792 GET_MODE (XEXP (op, 0)));
796 /* Rotations don't affect popcount. */
797 if (!side_effects_p (XEXP (op, 1)))
798 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
799 GET_MODE (XEXP (op, 0)));
808 switch (GET_CODE (op))
814 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
815 GET_MODE (XEXP (op, 0)));
819 /* Rotations don't affect parity. */
820 if (!side_effects_p (XEXP (op, 1)))
821 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
831 /* (bswap (bswap x)) -> x. */
832 if (GET_CODE (op) == BSWAP)
837 /* (float (sign_extend <X>)) = (float <X>). */
838 if (GET_CODE (op) == SIGN_EXTEND)
839 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
840 GET_MODE (XEXP (op, 0)));
844 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
845 becomes just the MINUS if its mode is MODE. This allows
846 folding switch statements on machines using casesi (such as
848 if (GET_CODE (op) == TRUNCATE
849 && GET_MODE (XEXP (op, 0)) == mode
850 && GET_CODE (XEXP (op, 0)) == MINUS
851 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
852 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
855 /* Check for a sign extension of a subreg of a promoted
856 variable, where the promotion is sign-extended, and the
857 target mode is the same as the variable's promotion. */
858 if (GET_CODE (op) == SUBREG
859 && SUBREG_PROMOTED_VAR_P (op)
860 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
861 && GET_MODE (XEXP (op, 0)) == mode)
864 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
865 if (! POINTERS_EXTEND_UNSIGNED
866 && mode == Pmode && GET_MODE (op) == ptr_mode
868 || (GET_CODE (op) == SUBREG
869 && REG_P (SUBREG_REG (op))
870 && REG_POINTER (SUBREG_REG (op))
871 && GET_MODE (SUBREG_REG (op)) == Pmode)))
872 return convert_memory_address (Pmode, op);
877 /* Check for a zero extension of a subreg of a promoted
878 variable, where the promotion is zero-extended, and the
879 target mode is the same as the variable's promotion. */
880 if (GET_CODE (op) == SUBREG
881 && SUBREG_PROMOTED_VAR_P (op)
882 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
883 && GET_MODE (XEXP (op, 0)) == mode)
886 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
887 if (POINTERS_EXTEND_UNSIGNED > 0
888 && mode == Pmode && GET_MODE (op) == ptr_mode
890 || (GET_CODE (op) == SUBREG
891 && REG_P (SUBREG_REG (op))
892 && REG_POINTER (SUBREG_REG (op))
893 && GET_MODE (SUBREG_REG (op)) == Pmode)))
894 return convert_memory_address (Pmode, op);
905 /* Try to compute the value of a unary operation CODE whose output mode is to
906 be MODE with input operand OP whose mode was originally OP_MODE.
907 Return zero if the value cannot be computed. */
909 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
910 rtx op, enum machine_mode op_mode)
912 unsigned int width = GET_MODE_BITSIZE (mode);
914 if (code == VEC_DUPLICATE)
916 gcc_assert (VECTOR_MODE_P (mode));
917 if (GET_MODE (op) != VOIDmode)
919 if (!VECTOR_MODE_P (GET_MODE (op)))
920 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
925 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
926 || GET_CODE (op) == CONST_VECTOR)
928 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
929 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
930 rtvec v = rtvec_alloc (n_elts);
933 if (GET_CODE (op) != CONST_VECTOR)
934 for (i = 0; i < n_elts; i++)
935 RTVEC_ELT (v, i) = op;
938 enum machine_mode inmode = GET_MODE (op);
939 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
940 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
942 gcc_assert (in_n_elts < n_elts);
943 gcc_assert ((n_elts % in_n_elts) == 0);
944 for (i = 0; i < n_elts; i++)
945 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
947 return gen_rtx_CONST_VECTOR (mode, v);
951 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
953 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
954 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
955 enum machine_mode opmode = GET_MODE (op);
956 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
957 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
958 rtvec v = rtvec_alloc (n_elts);
961 gcc_assert (op_n_elts == n_elts);
962 for (i = 0; i < n_elts; i++)
964 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
965 CONST_VECTOR_ELT (op, i),
966 GET_MODE_INNER (opmode));
969 RTVEC_ELT (v, i) = x;
971 return gen_rtx_CONST_VECTOR (mode, v);
974 /* The order of these tests is critical so that, for example, we don't
975 check the wrong mode (input vs. output) for a conversion operation,
976 such as FIX. At some point, this should be simplified. */
978 if (code == FLOAT && GET_MODE (op) == VOIDmode
979 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
981 HOST_WIDE_INT hv, lv;
984 if (GET_CODE (op) == CONST_INT)
985 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
989 REAL_VALUE_FROM_INT (d, lv, hv, mode);
990 d = real_value_truncate (mode, d);
991 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
993 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
994 && (GET_CODE (op) == CONST_DOUBLE
995 || GET_CODE (op) == CONST_INT))
997 HOST_WIDE_INT hv, lv;
1000 if (GET_CODE (op) == CONST_INT)
1001 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1005 if (op_mode == VOIDmode)
1007 /* We don't know how to interpret negative-looking numbers in
1008 this case, so don't try to fold those. */
1012 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1015 hv = 0, lv &= GET_MODE_MASK (op_mode);
1017 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1018 d = real_value_truncate (mode, d);
1019 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1022 if (GET_CODE (op) == CONST_INT
1023 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1025 HOST_WIDE_INT arg0 = INTVAL (op);
1039 val = (arg0 >= 0 ? arg0 : - arg0);
1043 /* Don't use ffs here. Instead, get low order bit and then its
1044 number. If arg0 is zero, this will return 0, as desired. */
1045 arg0 &= GET_MODE_MASK (mode);
1046 val = exact_log2 (arg0 & (- arg0)) + 1;
1050 arg0 &= GET_MODE_MASK (mode);
1051 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1054 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1058 arg0 &= GET_MODE_MASK (mode);
1061 /* Even if the value at zero is undefined, we have to come
1062 up with some replacement. Seems good enough. */
1063 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1064 val = GET_MODE_BITSIZE (mode);
1067 val = exact_log2 (arg0 & -arg0);
1071 arg0 &= GET_MODE_MASK (mode);
1074 val++, arg0 &= arg0 - 1;
1078 arg0 &= GET_MODE_MASK (mode);
1081 val++, arg0 &= arg0 - 1;
1090 for (s = 0; s < width; s += 8)
1092 unsigned int d = width - s - 8;
1093 unsigned HOST_WIDE_INT byte;
1094 byte = (arg0 >> s) & 0xff;
1105 /* When zero-extending a CONST_INT, we need to know its
1107 gcc_assert (op_mode != VOIDmode);
1108 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1110 /* If we were really extending the mode,
1111 we would have to distinguish between zero-extension
1112 and sign-extension. */
1113 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1116 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1117 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1123 if (op_mode == VOIDmode)
1125 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1127 /* If we were really extending the mode,
1128 we would have to distinguish between zero-extension
1129 and sign-extension. */
1130 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1133 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1136 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1139 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1147 case FLOAT_TRUNCATE:
1157 return gen_int_mode (val, mode);
1160 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1161 for a DImode operation on a CONST_INT. */
1162 else if (GET_MODE (op) == VOIDmode
1163 && width <= HOST_BITS_PER_WIDE_INT * 2
1164 && (GET_CODE (op) == CONST_DOUBLE
1165 || GET_CODE (op) == CONST_INT))
1167 unsigned HOST_WIDE_INT l1, lv;
1168 HOST_WIDE_INT h1, hv;
1170 if (GET_CODE (op) == CONST_DOUBLE)
1171 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1173 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1183 neg_double (l1, h1, &lv, &hv);
1188 neg_double (l1, h1, &lv, &hv);
1200 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1203 lv = exact_log2 (l1 & -l1) + 1;
1209 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1210 - HOST_BITS_PER_WIDE_INT;
1212 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1213 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1214 lv = GET_MODE_BITSIZE (mode);
1220 lv = exact_log2 (l1 & -l1);
1222 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1223 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1224 lv = GET_MODE_BITSIZE (mode);
1252 for (s = 0; s < width; s += 8)
1254 unsigned int d = width - s - 8;
1255 unsigned HOST_WIDE_INT byte;
1257 if (s < HOST_BITS_PER_WIDE_INT)
1258 byte = (l1 >> s) & 0xff;
1260 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1262 if (d < HOST_BITS_PER_WIDE_INT)
1265 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1271 /* This is just a change-of-mode, so do nothing. */
1276 gcc_assert (op_mode != VOIDmode);
1278 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1282 lv = l1 & GET_MODE_MASK (op_mode);
1286 if (op_mode == VOIDmode
1287 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1291 lv = l1 & GET_MODE_MASK (op_mode);
1292 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1293 && (lv & ((HOST_WIDE_INT) 1
1294 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1295 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1297 hv = HWI_SIGN_EXTEND (lv);
1308 return immed_double_const (lv, hv, mode);
1311 else if (GET_CODE (op) == CONST_DOUBLE
1312 && SCALAR_FLOAT_MODE_P (mode))
1314 REAL_VALUE_TYPE d, t;
1315 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1320 if (HONOR_SNANS (mode) && real_isnan (&d))
1322 real_sqrt (&t, mode, &d);
1326 d = REAL_VALUE_ABS (d);
1329 d = REAL_VALUE_NEGATE (d);
1331 case FLOAT_TRUNCATE:
1332 d = real_value_truncate (mode, d);
1335 /* All this does is change the mode. */
1338 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1345 real_to_target (tmp, &d, GET_MODE (op));
1346 for (i = 0; i < 4; i++)
1348 real_from_target (&d, tmp, mode);
1354 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1357 else if (GET_CODE (op) == CONST_DOUBLE
1358 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1359 && GET_MODE_CLASS (mode) == MODE_INT
1360 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1362 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1363 operators are intentionally left unspecified (to ease implementation
1364 by target backends), for consistency, this routine implements the
1365 same semantics for constant folding as used by the middle-end. */
1367 /* This was formerly used only for non-IEEE float.
1368 eggert@twinsun.com says it is safe for IEEE also. */
1369 HOST_WIDE_INT xh, xl, th, tl;
1370 REAL_VALUE_TYPE x, t;
1371 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1375 if (REAL_VALUE_ISNAN (x))
1378 /* Test against the signed upper bound. */
1379 if (width > HOST_BITS_PER_WIDE_INT)
1381 th = ((unsigned HOST_WIDE_INT) 1
1382 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1388 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1390 real_from_integer (&t, VOIDmode, tl, th, 0);
1391 if (REAL_VALUES_LESS (t, x))
1398 /* Test against the signed lower bound. */
1399 if (width > HOST_BITS_PER_WIDE_INT)
1401 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1407 tl = (HOST_WIDE_INT) -1 << (width - 1);
1409 real_from_integer (&t, VOIDmode, tl, th, 0);
1410 if (REAL_VALUES_LESS (x, t))
1416 REAL_VALUE_TO_INT (&xl, &xh, x);
1420 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 /* Test against the unsigned upper bound. */
1424 if (width == 2*HOST_BITS_PER_WIDE_INT)
1429 else if (width >= HOST_BITS_PER_WIDE_INT)
1431 th = ((unsigned HOST_WIDE_INT) 1
1432 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1438 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1440 real_from_integer (&t, VOIDmode, tl, th, 1);
1441 if (REAL_VALUES_LESS (t, x))
1448 REAL_VALUE_TO_INT (&xl, &xh, x);
1454 return immed_double_const (xl, xh, mode);
1460 /* Subroutine of simplify_binary_operation to simplify a commutative,
1461 associative binary operation CODE with result mode MODE, operating
1462 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1463 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1464 canonicalization is possible. */
1467 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1472 /* Linearize the operator to the left. */
1473 if (GET_CODE (op1) == code)
1475 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1476 if (GET_CODE (op0) == code)
1478 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1479 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1482 /* "a op (b op c)" becomes "(b op c) op a". */
1483 if (! swap_commutative_operands_p (op1, op0))
1484 return simplify_gen_binary (code, mode, op1, op0);
1491 if (GET_CODE (op0) == code)
1493 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1494 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1496 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1497 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1500 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1501 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1503 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1505 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1506 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1508 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1515 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1516 and OP1. Return 0 if no simplification is possible.
1518 Don't use this for relational operations such as EQ or LT.
1519 Use simplify_relational_operation instead. */
1521 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx trueop0, trueop1;
1527 /* Relational operations don't work here. We must know the mode
1528 of the operands in order to do the comparison correctly.
1529 Assuming a full word can give incorrect results.
1530 Consider comparing 128 with -128 in QImode. */
1531 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1532 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1534 /* Make sure the constant is second. */
1535 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1536 && swap_commutative_operands_p (op0, op1))
1538 tem = op0, op0 = op1, op1 = tem;
1541 trueop0 = avoid_constant_pool_reference (op0);
1542 trueop1 = avoid_constant_pool_reference (op1);
1544 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1550 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1551 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1552 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1553 actual constants. */
1556 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1557 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1559 rtx tem, reversed, opleft, opright;
1561 unsigned int width = GET_MODE_BITSIZE (mode);
1563 /* Even if we can't compute a constant result,
1564 there are some cases worth simplifying. */
1569 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1570 when x is NaN, infinite, or finite and nonzero. They aren't
1571 when x is -0 and the rounding mode is not towards -infinity,
1572 since (-0) + 0 is then 0. */
1573 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1577 transformations are safe even for IEEE. */
1578 if (GET_CODE (op0) == NEG)
1579 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1580 else if (GET_CODE (op1) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1583 /* (~a) + 1 -> -a */
1584 if (INTEGRAL_MODE_P (mode)
1585 && GET_CODE (op0) == NOT
1586 && trueop1 == const1_rtx)
1587 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1589 /* Handle both-operands-constant cases. We can only add
1590 CONST_INTs to constants since the sum of relocatable symbols
1591 can't be handled by most assemblers. Don't add CONST_INT
1592 to CONST_INT since overflow won't be computed properly if wider
1593 than HOST_BITS_PER_WIDE_INT. */
1595 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1596 && GET_CODE (op1) == CONST_INT)
1597 return plus_constant (op0, INTVAL (op1));
1598 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1599 && GET_CODE (op0) == CONST_INT)
1600 return plus_constant (op1, INTVAL (op0));
1602 /* See if this is something like X * C - X or vice versa or
1603 if the multiplication is written as a shift. If so, we can
1604 distribute and make a new multiply, shift, or maybe just
1605 have X (if C is 2 in the example above). But don't make
1606 something more expensive than we had before. */
1608 if (SCALAR_INT_MODE_P (mode))
1610 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1611 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1612 rtx lhs = op0, rhs = op1;
1614 if (GET_CODE (lhs) == NEG)
1618 lhs = XEXP (lhs, 0);
1620 else if (GET_CODE (lhs) == MULT
1621 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1623 coeff0l = INTVAL (XEXP (lhs, 1));
1624 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1625 lhs = XEXP (lhs, 0);
1627 else if (GET_CODE (lhs) == ASHIFT
1628 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1629 && INTVAL (XEXP (lhs, 1)) >= 0
1630 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1632 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1634 lhs = XEXP (lhs, 0);
1637 if (GET_CODE (rhs) == NEG)
1641 rhs = XEXP (rhs, 0);
1643 else if (GET_CODE (rhs) == MULT
1644 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1646 coeff1l = INTVAL (XEXP (rhs, 1));
1647 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1648 rhs = XEXP (rhs, 0);
1650 else if (GET_CODE (rhs) == ASHIFT
1651 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1652 && INTVAL (XEXP (rhs, 1)) >= 0
1653 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1655 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1657 rhs = XEXP (rhs, 0);
1660 if (rtx_equal_p (lhs, rhs))
1662 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1664 unsigned HOST_WIDE_INT l;
1667 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1668 coeff = immed_double_const (l, h, mode);
1670 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1671 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1676 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1677 if ((GET_CODE (op1) == CONST_INT
1678 || GET_CODE (op1) == CONST_DOUBLE)
1679 && GET_CODE (op0) == XOR
1680 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1681 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1682 && mode_signbit_p (mode, op1))
1683 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1684 simplify_gen_binary (XOR, mode, op1,
1687 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1688 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1689 && GET_CODE (op0) == MULT
1690 && GET_CODE (XEXP (op0, 0)) == NEG)
1694 in1 = XEXP (XEXP (op0, 0), 0);
1695 in2 = XEXP (op0, 1);
1696 return simplify_gen_binary (MINUS, mode, op1,
1697 simplify_gen_binary (MULT, mode,
1701 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1702 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1704 if (COMPARISON_P (op0)
1705 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1706 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1707 && (reversed = reversed_comparison (op0, mode)))
1709 simplify_gen_unary (NEG, mode, reversed, mode);
1711 /* If one of the operands is a PLUS or a MINUS, see if we can
1712 simplify this by the associative law.
1713 Don't use the associative law for floating point.
1714 The inaccuracy makes it nonassociative,
1715 and subtle programs can break if operations are associated. */
1717 if (INTEGRAL_MODE_P (mode)
1718 && (plus_minus_operand_p (op0)
1719 || plus_minus_operand_p (op1))
1720 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1723 /* Reassociate floating point addition only when the user
1724 specifies unsafe math optimizations. */
1725 if (FLOAT_MODE_P (mode)
1726 && flag_unsafe_math_optimizations)
1728 tem = simplify_associative_operation (code, mode, op0, op1);
1736 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1737 using cc0, in which case we want to leave it as a COMPARE
1738 so we can distinguish it from a register-register-copy.
1740 In IEEE floating point, x-0 is not the same as x. */
1742 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1743 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1744 && trueop1 == CONST0_RTX (mode))
1748 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1749 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1750 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1751 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1753 rtx xop00 = XEXP (op0, 0);
1754 rtx xop10 = XEXP (op1, 0);
1757 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1759 if (REG_P (xop00) && REG_P (xop10)
1760 && GET_MODE (xop00) == GET_MODE (xop10)
1761 && REGNO (xop00) == REGNO (xop10)
1762 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1763 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1770 /* We can't assume x-x is 0 even with non-IEEE floating point,
1771 but since it is zero except in very strange circumstances, we
1772 will treat it as zero with -ffinite-math-only. */
1773 if (rtx_equal_p (trueop0, trueop1)
1774 && ! side_effects_p (op0)
1775 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1776 return CONST0_RTX (mode);
1778 /* Change subtraction from zero into negation. (0 - x) is the
1779 same as -x when x is NaN, infinite, or finite and nonzero.
1780 But if the mode has signed zeros, and does not round towards
1781 -infinity, then 0 - 0 is 0, not -0. */
1782 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1783 return simplify_gen_unary (NEG, mode, op1, mode);
1785 /* (-1 - a) is ~a. */
1786 if (trueop0 == constm1_rtx)
1787 return simplify_gen_unary (NOT, mode, op1, mode);
1789 /* Subtracting 0 has no effect unless the mode has signed zeros
1790 and supports rounding towards -infinity. In such a case,
1792 if (!(HONOR_SIGNED_ZEROS (mode)
1793 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1794 && trueop1 == CONST0_RTX (mode))
1797 /* See if this is something like X * C - X or vice versa or
1798 if the multiplication is written as a shift. If so, we can
1799 distribute and make a new multiply, shift, or maybe just
1800 have X (if C is 2 in the example above). But don't make
1801 something more expensive than we had before. */
1803 if (SCALAR_INT_MODE_P (mode))
1805 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1806 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1807 rtx lhs = op0, rhs = op1;
1809 if (GET_CODE (lhs) == NEG)
1813 lhs = XEXP (lhs, 0);
1815 else if (GET_CODE (lhs) == MULT
1816 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1818 coeff0l = INTVAL (XEXP (lhs, 1));
1819 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1820 lhs = XEXP (lhs, 0);
1822 else if (GET_CODE (lhs) == ASHIFT
1823 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1824 && INTVAL (XEXP (lhs, 1)) >= 0
1825 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1827 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1829 lhs = XEXP (lhs, 0);
1832 if (GET_CODE (rhs) == NEG)
1836 rhs = XEXP (rhs, 0);
1838 else if (GET_CODE (rhs) == MULT
1839 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1841 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1842 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1843 rhs = XEXP (rhs, 0);
1845 else if (GET_CODE (rhs) == ASHIFT
1846 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1847 && INTVAL (XEXP (rhs, 1)) >= 0
1848 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1850 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1852 rhs = XEXP (rhs, 0);
1855 if (rtx_equal_p (lhs, rhs))
1857 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1859 unsigned HOST_WIDE_INT l;
1862 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1863 coeff = immed_double_const (l, h, mode);
1865 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1866 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1871 /* (a - (-b)) -> (a + b). True even for IEEE. */
1872 if (GET_CODE (op1) == NEG)
1873 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1875 /* (-x - c) may be simplified as (-c - x). */
1876 if (GET_CODE (op0) == NEG
1877 && (GET_CODE (op1) == CONST_INT
1878 || GET_CODE (op1) == CONST_DOUBLE))
1880 tem = simplify_unary_operation (NEG, mode, op1, mode);
1882 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1885 /* Don't let a relocatable value get a negative coeff. */
1886 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1887 return simplify_gen_binary (PLUS, mode,
1889 neg_const_int (mode, op1));
1891 /* (x - (x & y)) -> (x & ~y) */
1892 if (GET_CODE (op1) == AND)
1894 if (rtx_equal_p (op0, XEXP (op1, 0)))
1896 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1897 GET_MODE (XEXP (op1, 1)));
1898 return simplify_gen_binary (AND, mode, op0, tem);
1900 if (rtx_equal_p (op0, XEXP (op1, 1)))
1902 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1903 GET_MODE (XEXP (op1, 0)));
1904 return simplify_gen_binary (AND, mode, op0, tem);
1908 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1909 by reversing the comparison code if valid. */
1910 if (STORE_FLAG_VALUE == 1
1911 && trueop0 == const1_rtx
1912 && COMPARISON_P (op1)
1913 && (reversed = reversed_comparison (op1, mode)))
1916 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1917 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1918 && GET_CODE (op1) == MULT
1919 && GET_CODE (XEXP (op1, 0)) == NEG)
1923 in1 = XEXP (XEXP (op1, 0), 0);
1924 in2 = XEXP (op1, 1);
1925 return simplify_gen_binary (PLUS, mode,
1926 simplify_gen_binary (MULT, mode,
1931 /* Canonicalize (minus (neg A) (mult B C)) to
1932 (minus (mult (neg B) C) A). */
1933 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1934 && GET_CODE (op1) == MULT
1935 && GET_CODE (op0) == NEG)
1939 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1940 in2 = XEXP (op1, 1);
1941 return simplify_gen_binary (MINUS, mode,
1942 simplify_gen_binary (MULT, mode,
1947 /* If one of the operands is a PLUS or a MINUS, see if we can
1948 simplify this by the associative law. This will, for example,
1949 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1950 Don't use the associative law for floating point.
1951 The inaccuracy makes it nonassociative,
1952 and subtle programs can break if operations are associated. */
1954 if (INTEGRAL_MODE_P (mode)
1955 && (plus_minus_operand_p (op0)
1956 || plus_minus_operand_p (op1))
1957 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1962 if (trueop1 == constm1_rtx)
1963 return simplify_gen_unary (NEG, mode, op0, mode);
1965 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1966 x is NaN, since x * 0 is then also NaN. Nor is it valid
1967 when the mode has signed zeros, since multiplying a negative
1968 number by 0 will give -0, not 0. */
1969 if (!HONOR_NANS (mode)
1970 && !HONOR_SIGNED_ZEROS (mode)
1971 && trueop1 == CONST0_RTX (mode)
1972 && ! side_effects_p (op0))
1975 /* In IEEE floating point, x*1 is not equivalent to x for
1977 if (!HONOR_SNANS (mode)
1978 && trueop1 == CONST1_RTX (mode))
1981 /* Convert multiply by constant power of two into shift unless
1982 we are still generating RTL. This test is a kludge. */
1983 if (GET_CODE (trueop1) == CONST_INT
1984 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1985 /* If the mode is larger than the host word size, and the
1986 uppermost bit is set, then this isn't a power of two due
1987 to implicit sign extension. */
1988 && (width <= HOST_BITS_PER_WIDE_INT
1989 || val != HOST_BITS_PER_WIDE_INT - 1))
1990 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1992 /* Likewise for multipliers wider than a word. */
1993 if (GET_CODE (trueop1) == CONST_DOUBLE
1994 && (GET_MODE (trueop1) == VOIDmode
1995 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1996 && GET_MODE (op0) == mode
1997 && CONST_DOUBLE_LOW (trueop1) == 0
1998 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1999 return simplify_gen_binary (ASHIFT, mode, op0,
2000 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2002 /* x*2 is x+x and x*(-1) is -x */
2003 if (GET_CODE (trueop1) == CONST_DOUBLE
2004 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2005 && GET_MODE (op0) == mode)
2008 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2010 if (REAL_VALUES_EQUAL (d, dconst2))
2011 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2013 if (!HONOR_SNANS (mode)
2014 && REAL_VALUES_EQUAL (d, dconstm1))
2015 return simplify_gen_unary (NEG, mode, op0, mode);
2018 /* Optimize -x * -x as x * x. */
2019 if (FLOAT_MODE_P (mode)
2020 && GET_CODE (op0) == NEG
2021 && GET_CODE (op1) == NEG
2022 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2023 && !side_effects_p (XEXP (op0, 0)))
2024 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2026 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2027 if (SCALAR_FLOAT_MODE_P (mode)
2028 && GET_CODE (op0) == ABS
2029 && GET_CODE (op1) == ABS
2030 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2031 && !side_effects_p (XEXP (op0, 0)))
2032 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2034 /* Reassociate multiplication, but for floating point MULTs
2035 only when the user specifies unsafe math optimizations. */
2036 if (! FLOAT_MODE_P (mode)
2037 || flag_unsafe_math_optimizations)
2039 tem = simplify_associative_operation (code, mode, op0, op1);
2046 if (trueop1 == const0_rtx)
2048 if (GET_CODE (trueop1) == CONST_INT
2049 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2050 == GET_MODE_MASK (mode)))
2052 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2054 /* A | (~A) -> -1 */
2055 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2056 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2057 && ! side_effects_p (op0)
2058 && SCALAR_INT_MODE_P (mode))
2061 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2062 if (GET_CODE (op1) == CONST_INT
2063 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2064 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2067 /* Canonicalize (X & C1) | C2. */
2068 if (GET_CODE (op0) == AND
2069 && GET_CODE (trueop1) == CONST_INT
2070 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2072 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2073 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2074 HOST_WIDE_INT c2 = INTVAL (trueop1);
2076 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2078 && !side_effects_p (XEXP (op0, 0)))
2081 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2082 if (((c1|c2) & mask) == mask)
2083 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2085 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2086 if (((c1 & ~c2) & mask) != (c1 & mask))
2088 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2089 gen_int_mode (c1 & ~c2, mode));
2090 return simplify_gen_binary (IOR, mode, tem, op1);
2094 /* Convert (A & B) | A to A. */
2095 if (GET_CODE (op0) == AND
2096 && (rtx_equal_p (XEXP (op0, 0), op1)
2097 || rtx_equal_p (XEXP (op0, 1), op1))
2098 && ! side_effects_p (XEXP (op0, 0))
2099 && ! side_effects_p (XEXP (op0, 1)))
2102 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2103 mode size to (rotate A CX). */
2105 if (GET_CODE (op1) == ASHIFT
2106 || GET_CODE (op1) == SUBREG)
2117 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2118 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2119 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2120 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2121 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2122 == GET_MODE_BITSIZE (mode)))
2123 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2125 /* Same, but for ashift that has been "simplified" to a wider mode
2126 by simplify_shift_const. */
2128 if (GET_CODE (opleft) == SUBREG
2129 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2130 && GET_CODE (opright) == LSHIFTRT
2131 && GET_CODE (XEXP (opright, 0)) == SUBREG
2132 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2133 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2134 && (GET_MODE_SIZE (GET_MODE (opleft))
2135 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2136 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2137 SUBREG_REG (XEXP (opright, 0)))
2138 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2139 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2140 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2141 == GET_MODE_BITSIZE (mode)))
2142 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2143 XEXP (SUBREG_REG (opleft), 1));
2145 /* If we have (ior (and (X C1) C2)), simplify this by making
2146 C1 as small as possible if C1 actually changes. */
2147 if (GET_CODE (op1) == CONST_INT
2148 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2149 || INTVAL (op1) > 0)
2150 && GET_CODE (op0) == AND
2151 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2152 && GET_CODE (op1) == CONST_INT
2153 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2154 return simplify_gen_binary (IOR, mode,
2156 (AND, mode, XEXP (op0, 0),
2157 GEN_INT (INTVAL (XEXP (op0, 1))
2161 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2162 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2163 the PLUS does not affect any of the bits in OP1: then we can do
2164 the IOR as a PLUS and we can associate. This is valid if OP1
2165 can be safely shifted left C bits. */
2166 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2167 && GET_CODE (XEXP (op0, 0)) == PLUS
2168 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2169 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2170 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2172 int count = INTVAL (XEXP (op0, 1));
2173 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2175 if (mask >> count == INTVAL (trueop1)
2176 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2177 return simplify_gen_binary (ASHIFTRT, mode,
2178 plus_constant (XEXP (op0, 0), mask),
2182 tem = simplify_associative_operation (code, mode, op0, op1);
2188 if (trueop1 == const0_rtx)
2190 if (GET_CODE (trueop1) == CONST_INT
2191 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2192 == GET_MODE_MASK (mode)))
2193 return simplify_gen_unary (NOT, mode, op0, mode);
2194 if (rtx_equal_p (trueop0, trueop1)
2195 && ! side_effects_p (op0)
2196 && GET_MODE_CLASS (mode) != MODE_CC)
2197 return CONST0_RTX (mode);
2199 /* Canonicalize XOR of the most significant bit to PLUS. */
2200 if ((GET_CODE (op1) == CONST_INT
2201 || GET_CODE (op1) == CONST_DOUBLE)
2202 && mode_signbit_p (mode, op1))
2203 return simplify_gen_binary (PLUS, mode, op0, op1);
2204 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2205 if ((GET_CODE (op1) == CONST_INT
2206 || GET_CODE (op1) == CONST_DOUBLE)
2207 && GET_CODE (op0) == PLUS
2208 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2209 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2210 && mode_signbit_p (mode, XEXP (op0, 1)))
2211 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2212 simplify_gen_binary (XOR, mode, op1,
2215 /* If we are XORing two things that have no bits in common,
2216 convert them into an IOR. This helps to detect rotation encoded
2217 using those methods and possibly other simplifications. */
2219 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2220 && (nonzero_bits (op0, mode)
2221 & nonzero_bits (op1, mode)) == 0)
2222 return (simplify_gen_binary (IOR, mode, op0, op1));
2224 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2225 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2228 int num_negated = 0;
2230 if (GET_CODE (op0) == NOT)
2231 num_negated++, op0 = XEXP (op0, 0);
2232 if (GET_CODE (op1) == NOT)
2233 num_negated++, op1 = XEXP (op1, 0);
2235 if (num_negated == 2)
2236 return simplify_gen_binary (XOR, mode, op0, op1);
2237 else if (num_negated == 1)
2238 return simplify_gen_unary (NOT, mode,
2239 simplify_gen_binary (XOR, mode, op0, op1),
2243 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2244 correspond to a machine insn or result in further simplifications
2245 if B is a constant. */
2247 if (GET_CODE (op0) == AND
2248 && rtx_equal_p (XEXP (op0, 1), op1)
2249 && ! side_effects_p (op1))
2250 return simplify_gen_binary (AND, mode,
2251 simplify_gen_unary (NOT, mode,
2252 XEXP (op0, 0), mode),
2255 else if (GET_CODE (op0) == AND
2256 && rtx_equal_p (XEXP (op0, 0), op1)
2257 && ! side_effects_p (op1))
2258 return simplify_gen_binary (AND, mode,
2259 simplify_gen_unary (NOT, mode,
2260 XEXP (op0, 1), mode),
2263 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2264 comparison if STORE_FLAG_VALUE is 1. */
2265 if (STORE_FLAG_VALUE == 1
2266 && trueop1 == const1_rtx
2267 && COMPARISON_P (op0)
2268 && (reversed = reversed_comparison (op0, mode)))
2271 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2272 is (lt foo (const_int 0)), so we can perform the above
2273 simplification if STORE_FLAG_VALUE is 1. */
2275 if (STORE_FLAG_VALUE == 1
2276 && trueop1 == const1_rtx
2277 && GET_CODE (op0) == LSHIFTRT
2278 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2279 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2280 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2282 /* (xor (comparison foo bar) (const_int sign-bit))
2283 when STORE_FLAG_VALUE is the sign bit. */
2284 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2285 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2286 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2287 && trueop1 == const_true_rtx
2288 && COMPARISON_P (op0)
2289 && (reversed = reversed_comparison (op0, mode)))
2292 tem = simplify_associative_operation (code, mode, op0, op1);
2298 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2300 /* If we are turning off bits already known off in OP0, we need
2302 if (GET_CODE (trueop1) == CONST_INT
2303 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2304 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2306 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2307 && GET_MODE_CLASS (mode) != MODE_CC)
2310 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2311 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2312 && ! side_effects_p (op0)
2313 && GET_MODE_CLASS (mode) != MODE_CC)
2314 return CONST0_RTX (mode);
2316 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2317 there are no nonzero bits of C outside of X's mode. */
2318 if ((GET_CODE (op0) == SIGN_EXTEND
2319 || GET_CODE (op0) == ZERO_EXTEND)
2320 && GET_CODE (trueop1) == CONST_INT
2321 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2322 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2323 & INTVAL (trueop1)) == 0)
2325 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2326 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2327 gen_int_mode (INTVAL (trueop1),
2329 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2332 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2333 if (GET_CODE (op0) == IOR
2334 && GET_CODE (trueop1) == CONST_INT
2335 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2337 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2338 return simplify_gen_binary (IOR, mode,
2339 simplify_gen_binary (AND, mode,
2340 XEXP (op0, 0), op1),
2341 gen_int_mode (tmp, mode));
2344 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2345 insn (and may simplify more). */
2346 if (GET_CODE (op0) == XOR
2347 && rtx_equal_p (XEXP (op0, 0), op1)
2348 && ! side_effects_p (op1))
2349 return simplify_gen_binary (AND, mode,
2350 simplify_gen_unary (NOT, mode,
2351 XEXP (op0, 1), mode),
2354 if (GET_CODE (op0) == XOR
2355 && rtx_equal_p (XEXP (op0, 1), op1)
2356 && ! side_effects_p (op1))
2357 return simplify_gen_binary (AND, mode,
2358 simplify_gen_unary (NOT, mode,
2359 XEXP (op0, 0), mode),
2362 /* Similarly for (~(A ^ B)) & A. */
2363 if (GET_CODE (op0) == NOT
2364 && GET_CODE (XEXP (op0, 0)) == XOR
2365 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2366 && ! side_effects_p (op1))
2367 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2369 if (GET_CODE (op0) == NOT
2370 && GET_CODE (XEXP (op0, 0)) == XOR
2371 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2372 && ! side_effects_p (op1))
2373 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2375 /* Convert (A | B) & A to A. */
2376 if (GET_CODE (op0) == IOR
2377 && (rtx_equal_p (XEXP (op0, 0), op1)
2378 || rtx_equal_p (XEXP (op0, 1), op1))
2379 && ! side_effects_p (XEXP (op0, 0))
2380 && ! side_effects_p (XEXP (op0, 1)))
2383 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2384 ((A & N) + B) & M -> (A + B) & M
2385 Similarly if (N & M) == 0,
2386 ((A | N) + B) & M -> (A + B) & M
2387 and for - instead of + and/or ^ instead of |. */
2388 if (GET_CODE (trueop1) == CONST_INT
2389 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2390 && ~INTVAL (trueop1)
2391 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2392 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2397 pmop[0] = XEXP (op0, 0);
2398 pmop[1] = XEXP (op0, 1);
2400 for (which = 0; which < 2; which++)
2403 switch (GET_CODE (tem))
2406 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2407 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2408 == INTVAL (trueop1))
2409 pmop[which] = XEXP (tem, 0);
2413 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2414 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2415 pmop[which] = XEXP (tem, 0);
2422 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2424 tem = simplify_gen_binary (GET_CODE (op0), mode,
2426 return simplify_gen_binary (code, mode, tem, op1);
2429 tem = simplify_associative_operation (code, mode, op0, op1);
2435 /* 0/x is 0 (or x&0 if x has side-effects). */
2436 if (trueop0 == CONST0_RTX (mode))
2438 if (side_effects_p (op1))
2439 return simplify_gen_binary (AND, mode, op1, trueop0);
2443 if (trueop1 == CONST1_RTX (mode))
2444 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2445 /* Convert divide by power of two into shift. */
2446 if (GET_CODE (trueop1) == CONST_INT
2447 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2448 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2452 /* Handle floating point and integers separately. */
2453 if (SCALAR_FLOAT_MODE_P (mode))
2455 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2456 safe for modes with NaNs, since 0.0 / 0.0 will then be
2457 NaN rather than 0.0. Nor is it safe for modes with signed
2458 zeros, since dividing 0 by a negative number gives -0.0 */
2459 if (trueop0 == CONST0_RTX (mode)
2460 && !HONOR_NANS (mode)
2461 && !HONOR_SIGNED_ZEROS (mode)
2462 && ! side_effects_p (op1))
2465 if (trueop1 == CONST1_RTX (mode)
2466 && !HONOR_SNANS (mode))
2469 if (GET_CODE (trueop1) == CONST_DOUBLE
2470 && trueop1 != CONST0_RTX (mode))
2473 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2476 if (REAL_VALUES_EQUAL (d, dconstm1)
2477 && !HONOR_SNANS (mode))
2478 return simplify_gen_unary (NEG, mode, op0, mode);
2480 /* Change FP division by a constant into multiplication.
2481 Only do this with -funsafe-math-optimizations. */
2482 if (flag_unsafe_math_optimizations
2483 && !REAL_VALUES_EQUAL (d, dconst0))
2485 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2486 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2487 return simplify_gen_binary (MULT, mode, op0, tem);
2493 /* 0/x is 0 (or x&0 if x has side-effects). */
2494 if (trueop0 == CONST0_RTX (mode))
2496 if (side_effects_p (op1))
2497 return simplify_gen_binary (AND, mode, op1, trueop0);
2501 if (trueop1 == CONST1_RTX (mode))
2502 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2504 if (trueop1 == constm1_rtx)
2506 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2507 return simplify_gen_unary (NEG, mode, x, mode);
2513 /* 0%x is 0 (or x&0 if x has side-effects). */
2514 if (trueop0 == CONST0_RTX (mode))
2516 if (side_effects_p (op1))
2517 return simplify_gen_binary (AND, mode, op1, trueop0);
2520 /* x%1 is 0 (of x&0 if x has side-effects). */
2521 if (trueop1 == CONST1_RTX (mode))
2523 if (side_effects_p (op0))
2524 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2525 return CONST0_RTX (mode);
2527 /* Implement modulus by power of two as AND. */
2528 if (GET_CODE (trueop1) == CONST_INT
2529 && exact_log2 (INTVAL (trueop1)) > 0)
2530 return simplify_gen_binary (AND, mode, op0,
2531 GEN_INT (INTVAL (op1) - 1));
2535 /* 0%x is 0 (or x&0 if x has side-effects). */
2536 if (trueop0 == CONST0_RTX (mode))
2538 if (side_effects_p (op1))
2539 return simplify_gen_binary (AND, mode, op1, trueop0);
2542 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2543 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2545 if (side_effects_p (op0))
2546 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2547 return CONST0_RTX (mode);
2554 if (trueop1 == CONST0_RTX (mode))
2556 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2558 /* Rotating ~0 always results in ~0. */
2559 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2560 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2561 && ! side_effects_p (op1))
2567 if (trueop1 == CONST0_RTX (mode))
2569 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2574 if (trueop1 == CONST0_RTX (mode))
2576 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2578 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2579 if (GET_CODE (op0) == CLZ
2580 && GET_CODE (trueop1) == CONST_INT
2581 && STORE_FLAG_VALUE == 1
2582 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2584 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2585 unsigned HOST_WIDE_INT zero_val = 0;
2587 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2588 && zero_val == GET_MODE_BITSIZE (imode)
2589 && INTVAL (trueop1) == exact_log2 (zero_val))
2590 return simplify_gen_relational (EQ, mode, imode,
2591 XEXP (op0, 0), const0_rtx);
2596 if (width <= HOST_BITS_PER_WIDE_INT
2597 && GET_CODE (trueop1) == CONST_INT
2598 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2599 && ! side_effects_p (op0))
2601 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2603 tem = simplify_associative_operation (code, mode, op0, op1);
2609 if (width <= HOST_BITS_PER_WIDE_INT
2610 && GET_CODE (trueop1) == CONST_INT
2611 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2612 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2613 && ! side_effects_p (op0))
2615 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2617 tem = simplify_associative_operation (code, mode, op0, op1);
2623 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2625 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2627 tem = simplify_associative_operation (code, mode, op0, op1);
2633 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2635 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2637 tem = simplify_associative_operation (code, mode, op0, op1);
2646 /* ??? There are simplifications that can be done. */
2650 if (!VECTOR_MODE_P (mode))
2652 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2653 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2654 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2655 gcc_assert (XVECLEN (trueop1, 0) == 1);
2656 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2658 if (GET_CODE (trueop0) == CONST_VECTOR)
2659 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2664 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2665 gcc_assert (GET_MODE_INNER (mode)
2666 == GET_MODE_INNER (GET_MODE (trueop0)));
2667 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2669 if (GET_CODE (trueop0) == CONST_VECTOR)
2671 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2672 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2673 rtvec v = rtvec_alloc (n_elts);
2676 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2677 for (i = 0; i < n_elts; i++)
2679 rtx x = XVECEXP (trueop1, 0, i);
2681 gcc_assert (GET_CODE (x) == CONST_INT);
2682 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2686 return gen_rtx_CONST_VECTOR (mode, v);
2690 if (XVECLEN (trueop1, 0) == 1
2691 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2692 && GET_CODE (trueop0) == VEC_CONCAT)
2695 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2697 /* Try to find the element in the VEC_CONCAT. */
2698 while (GET_MODE (vec) != mode
2699 && GET_CODE (vec) == VEC_CONCAT)
2701 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2702 if (offset < vec_size)
2703 vec = XEXP (vec, 0);
2707 vec = XEXP (vec, 1);
2709 vec = avoid_constant_pool_reference (vec);
2712 if (GET_MODE (vec) == mode)
2719 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2720 ? GET_MODE (trueop0)
2721 : GET_MODE_INNER (mode));
2722 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2723 ? GET_MODE (trueop1)
2724 : GET_MODE_INNER (mode));
2726 gcc_assert (VECTOR_MODE_P (mode));
2727 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2728 == GET_MODE_SIZE (mode));
2730 if (VECTOR_MODE_P (op0_mode))
2731 gcc_assert (GET_MODE_INNER (mode)
2732 == GET_MODE_INNER (op0_mode));
2734 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2736 if (VECTOR_MODE_P (op1_mode))
2737 gcc_assert (GET_MODE_INNER (mode)
2738 == GET_MODE_INNER (op1_mode));
2740 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2742 if ((GET_CODE (trueop0) == CONST_VECTOR
2743 || GET_CODE (trueop0) == CONST_INT
2744 || GET_CODE (trueop0) == CONST_DOUBLE)
2745 && (GET_CODE (trueop1) == CONST_VECTOR
2746 || GET_CODE (trueop1) == CONST_INT
2747 || GET_CODE (trueop1) == CONST_DOUBLE))
2749 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2750 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2751 rtvec v = rtvec_alloc (n_elts);
2753 unsigned in_n_elts = 1;
2755 if (VECTOR_MODE_P (op0_mode))
2756 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2757 for (i = 0; i < n_elts; i++)
2761 if (!VECTOR_MODE_P (op0_mode))
2762 RTVEC_ELT (v, i) = trueop0;
2764 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2768 if (!VECTOR_MODE_P (op1_mode))
2769 RTVEC_ELT (v, i) = trueop1;
2771 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2776 return gen_rtx_CONST_VECTOR (mode, v);
2789 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2792 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2794 unsigned int width = GET_MODE_BITSIZE (mode);
2796 if (VECTOR_MODE_P (mode)
2797 && code != VEC_CONCAT
2798 && GET_CODE (op0) == CONST_VECTOR
2799 && GET_CODE (op1) == CONST_VECTOR)
2801 unsigned n_elts = GET_MODE_NUNITS (mode);
2802 enum machine_mode op0mode = GET_MODE (op0);
2803 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2804 enum machine_mode op1mode = GET_MODE (op1);
2805 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2806 rtvec v = rtvec_alloc (n_elts);
2809 gcc_assert (op0_n_elts == n_elts);
2810 gcc_assert (op1_n_elts == n_elts);
2811 for (i = 0; i < n_elts; i++)
2813 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2814 CONST_VECTOR_ELT (op0, i),
2815 CONST_VECTOR_ELT (op1, i));
2818 RTVEC_ELT (v, i) = x;
2821 return gen_rtx_CONST_VECTOR (mode, v);
2824 if (VECTOR_MODE_P (mode)
2825 && code == VEC_CONCAT
2826 && CONSTANT_P (op0) && CONSTANT_P (op1))
2828 unsigned n_elts = GET_MODE_NUNITS (mode);
2829 rtvec v = rtvec_alloc (n_elts);
2831 gcc_assert (n_elts >= 2);
2834 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2835 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2837 RTVEC_ELT (v, 0) = op0;
2838 RTVEC_ELT (v, 1) = op1;
2842 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2843 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2846 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2847 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2848 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2850 for (i = 0; i < op0_n_elts; ++i)
2851 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2852 for (i = 0; i < op1_n_elts; ++i)
2853 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2856 return gen_rtx_CONST_VECTOR (mode, v);
2859 if (SCALAR_FLOAT_MODE_P (mode)
2860 && GET_CODE (op0) == CONST_DOUBLE
2861 && GET_CODE (op1) == CONST_DOUBLE
2862 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2873 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2875 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2877 for (i = 0; i < 4; i++)
2894 real_from_target (&r, tmp0, mode);
2895 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2899 REAL_VALUE_TYPE f0, f1, value, result;
2902 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2903 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2904 real_convert (&f0, mode, &f0);
2905 real_convert (&f1, mode, &f1);
2907 if (HONOR_SNANS (mode)
2908 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2912 && REAL_VALUES_EQUAL (f1, dconst0)
2913 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2916 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2917 && flag_trapping_math
2918 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2920 int s0 = REAL_VALUE_NEGATIVE (f0);
2921 int s1 = REAL_VALUE_NEGATIVE (f1);
2926 /* Inf + -Inf = NaN plus exception. */
2931 /* Inf - Inf = NaN plus exception. */
2936 /* Inf / Inf = NaN plus exception. */
2943 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2944 && flag_trapping_math
2945 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2946 || (REAL_VALUE_ISINF (f1)
2947 && REAL_VALUES_EQUAL (f0, dconst0))))
2948 /* Inf * 0 = NaN plus exception. */
2951 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2953 real_convert (&result, mode, &value);
2955 /* Don't constant fold this floating point operation if
2956 the result has overflowed and flag_trapping_math. */
2958 if (flag_trapping_math
2959 && MODE_HAS_INFINITIES (mode)
2960 && REAL_VALUE_ISINF (result)
2961 && !REAL_VALUE_ISINF (f0)
2962 && !REAL_VALUE_ISINF (f1))
2963 /* Overflow plus exception. */
2966 /* Don't constant fold this floating point operation if the
2967 result may dependent upon the run-time rounding mode and
2968 flag_rounding_math is set, or if GCC's software emulation
2969 is unable to accurately represent the result. */
2971 if ((flag_rounding_math
2972 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2973 && !flag_unsafe_math_optimizations))
2974 && (inexact || !real_identical (&result, &value)))
2977 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2981 /* We can fold some multi-word operations. */
2982 if (GET_MODE_CLASS (mode) == MODE_INT
2983 && width == HOST_BITS_PER_WIDE_INT * 2
2984 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2985 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2987 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2988 HOST_WIDE_INT h1, h2, hv, ht;
2990 if (GET_CODE (op0) == CONST_DOUBLE)
2991 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2993 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2995 if (GET_CODE (op1) == CONST_DOUBLE)
2996 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2998 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3003 /* A - B == A + (-B). */
3004 neg_double (l2, h2, &lv, &hv);
3007 /* Fall through.... */
3010 add_double (l1, h1, l2, h2, &lv, &hv);
3014 mul_double (l1, h1, l2, h2, &lv, &hv);
3018 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3019 &lv, &hv, <, &ht))
3024 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3025 <, &ht, &lv, &hv))
3030 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3031 &lv, &hv, <, &ht))
3036 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3037 <, &ht, &lv, &hv))
3042 lv = l1 & l2, hv = h1 & h2;
3046 lv = l1 | l2, hv = h1 | h2;
3050 lv = l1 ^ l2, hv = h1 ^ h2;
3056 && ((unsigned HOST_WIDE_INT) l1
3057 < (unsigned HOST_WIDE_INT) l2)))
3066 && ((unsigned HOST_WIDE_INT) l1
3067 > (unsigned HOST_WIDE_INT) l2)))
3074 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3076 && ((unsigned HOST_WIDE_INT) l1
3077 < (unsigned HOST_WIDE_INT) l2)))
3084 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3086 && ((unsigned HOST_WIDE_INT) l1
3087 > (unsigned HOST_WIDE_INT) l2)))
3093 case LSHIFTRT: case ASHIFTRT:
3095 case ROTATE: case ROTATERT:
3096 if (SHIFT_COUNT_TRUNCATED)
3097 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3099 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3102 if (code == LSHIFTRT || code == ASHIFTRT)
3103 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3105 else if (code == ASHIFT)
3106 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3107 else if (code == ROTATE)
3108 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3109 else /* code == ROTATERT */
3110 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3117 return immed_double_const (lv, hv, mode);
3120 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3121 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3123 /* Get the integer argument values in two forms:
3124 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3126 arg0 = INTVAL (op0);
3127 arg1 = INTVAL (op1);
3129 if (width < HOST_BITS_PER_WIDE_INT)
3131 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3132 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3135 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3136 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3139 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3140 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3148 /* Compute the value of the arithmetic. */
3153 val = arg0s + arg1s;
3157 val = arg0s - arg1s;
3161 val = arg0s * arg1s;
3166 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3169 val = arg0s / arg1s;
3174 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3177 val = arg0s % arg1s;
3182 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3185 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3190 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3193 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3211 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3212 the value is in range. We can't return any old value for
3213 out-of-range arguments because either the middle-end (via
3214 shift_truncation_mask) or the back-end might be relying on
3215 target-specific knowledge. Nor can we rely on
3216 shift_truncation_mask, since the shift might not be part of an
3217 ashlM3, lshrM3 or ashrM3 instruction. */
3218 if (SHIFT_COUNT_TRUNCATED)
3219 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3220 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3223 val = (code == ASHIFT
3224 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3225 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3227 /* Sign-extend the result for arithmetic right shifts. */
3228 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3229 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3237 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3238 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3246 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3247 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3251 /* Do nothing here. */
3255 val = arg0s <= arg1s ? arg0s : arg1s;
3259 val = ((unsigned HOST_WIDE_INT) arg0
3260 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3264 val = arg0s > arg1s ? arg0s : arg1s;
3268 val = ((unsigned HOST_WIDE_INT) arg0
3269 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3277 /* ??? There are simplifications that can be done. */
3284 return gen_int_mode (val, mode);
3292 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3295 Rather than test for specific case, we do this by a brute-force method
3296 and do all possible simplifications until no more changes occur. Then
3297 we rebuild the operation. */
3299 struct simplify_plus_minus_op_data
3306 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3310 result = (commutative_operand_precedence (y)
3311 - commutative_operand_precedence (x));
3315 /* Group together equal REGs to do more simplification. */
3316 if (REG_P (x) && REG_P (y))
3317 return REGNO (x) > REGNO (y);
3323 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3326 struct simplify_plus_minus_op_data ops[8];
3328 int n_ops = 2, input_ops = 2;
3329 int changed, n_constants = 0, canonicalized = 0;
3332 memset (ops, 0, sizeof ops);
3334 /* Set up the two operands and then expand them until nothing has been
3335 changed. If we run out of room in our array, give up; this should
3336 almost never happen. */
3341 ops[1].neg = (code == MINUS);
3347 for (i = 0; i < n_ops; i++)
3349 rtx this_op = ops[i].op;
3350 int this_neg = ops[i].neg;
3351 enum rtx_code this_code = GET_CODE (this_op);
3360 ops[n_ops].op = XEXP (this_op, 1);
3361 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3364 ops[i].op = XEXP (this_op, 0);
3367 canonicalized |= this_neg;
3371 ops[i].op = XEXP (this_op, 0);
3372 ops[i].neg = ! this_neg;
3379 && GET_CODE (XEXP (this_op, 0)) == PLUS
3380 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3381 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3383 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3384 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3385 ops[n_ops].neg = this_neg;
3393 /* ~a -> (-a - 1) */
3396 ops[n_ops].op = constm1_rtx;
3397 ops[n_ops++].neg = this_neg;
3398 ops[i].op = XEXP (this_op, 0);
3399 ops[i].neg = !this_neg;
3409 ops[i].op = neg_const_int (mode, this_op);
3423 if (n_constants > 1)
3426 gcc_assert (n_ops >= 2);
3428 /* If we only have two operands, we can avoid the loops. */
3431 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3434 /* Get the two operands. Be careful with the order, especially for
3435 the cases where code == MINUS. */
3436 if (ops[0].neg && ops[1].neg)
3438 lhs = gen_rtx_NEG (mode, ops[0].op);
3441 else if (ops[0].neg)
3452 return simplify_const_binary_operation (code, mode, lhs, rhs);
3455 /* Now simplify each pair of operands until nothing changes. */
3458 /* Insertion sort is good enough for an eight-element array. */
3459 for (i = 1; i < n_ops; i++)
3461 struct simplify_plus_minus_op_data save;
3463 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3469 ops[j + 1] = ops[j];
3470 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3474 /* This is only useful the first time through. */
3479 for (i = n_ops - 1; i > 0; i--)
3480 for (j = i - 1; j >= 0; j--)
3482 rtx lhs = ops[j].op, rhs = ops[i].op;
3483 int lneg = ops[j].neg, rneg = ops[i].neg;
3485 if (lhs != 0 && rhs != 0)
3487 enum rtx_code ncode = PLUS;
3493 tem = lhs, lhs = rhs, rhs = tem;
3495 else if (swap_commutative_operands_p (lhs, rhs))
3496 tem = lhs, lhs = rhs, rhs = tem;
3498 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3499 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3501 rtx tem_lhs, tem_rhs;
3503 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3504 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3505 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3507 if (tem && !CONSTANT_P (tem))
3508 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3511 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3513 /* Reject "simplifications" that just wrap the two
3514 arguments in a CONST. Failure to do so can result
3515 in infinite recursion with simplify_binary_operation
3516 when it calls us to simplify CONST operations. */
3518 && ! (GET_CODE (tem) == CONST
3519 && GET_CODE (XEXP (tem, 0)) == ncode
3520 && XEXP (XEXP (tem, 0), 0) == lhs
3521 && XEXP (XEXP (tem, 0), 1) == rhs))
3524 if (GET_CODE (tem) == NEG)
3525 tem = XEXP (tem, 0), lneg = !lneg;
3526 if (GET_CODE (tem) == CONST_INT && lneg)
3527 tem = neg_const_int (mode, tem), lneg = 0;
3531 ops[j].op = NULL_RTX;
3537 /* Pack all the operands to the lower-numbered entries. */
3538 for (i = 0, j = 0; j < n_ops; j++)
3548 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3550 && GET_CODE (ops[1].op) == CONST_INT
3551 && CONSTANT_P (ops[0].op)
3553 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3555 /* We suppressed creation of trivial CONST expressions in the
3556 combination loop to avoid recursion. Create one manually now.
3557 The combination loop should have ensured that there is exactly
3558 one CONST_INT, and the sort will have ensured that it is last
3559 in the array and that any other constant will be next-to-last. */
3562 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3563 && CONSTANT_P (ops[n_ops - 2].op))
3565 rtx value = ops[n_ops - 1].op;
3566 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3567 value = neg_const_int (mode, value);
3568 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3572 /* Put a non-negated operand first, if possible. */
3574 for (i = 0; i < n_ops && ops[i].neg; i++)
3577 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3586 /* Now make the result by performing the requested operations. */
3588 for (i = 1; i < n_ops; i++)
3589 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3590 mode, result, ops[i].op);
3595 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3597 plus_minus_operand_p (const_rtx x)
3599 return GET_CODE (x) == PLUS
3600 || GET_CODE (x) == MINUS
3601 || (GET_CODE (x) == CONST
3602 && GET_CODE (XEXP (x, 0)) == PLUS
3603 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3604 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3607 /* Like simplify_binary_operation except used for relational operators.
3608 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3609 not also be VOIDmode.
3611 CMP_MODE specifies in which mode the comparison is done in, so it is
3612 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3613 the operands or, if both are VOIDmode, the operands are compared in
3614 "infinite precision". */
3616 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3617 enum machine_mode cmp_mode, rtx op0, rtx op1)
3619 rtx tem, trueop0, trueop1;
3621 if (cmp_mode == VOIDmode)
3622 cmp_mode = GET_MODE (op0);
3623 if (cmp_mode == VOIDmode)
3624 cmp_mode = GET_MODE (op1);
3626 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3629 if (SCALAR_FLOAT_MODE_P (mode))
3631 if (tem == const0_rtx)
3632 return CONST0_RTX (mode);
3633 #ifdef FLOAT_STORE_FLAG_VALUE
3635 REAL_VALUE_TYPE val;
3636 val = FLOAT_STORE_FLAG_VALUE (mode);
3637 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3643 if (VECTOR_MODE_P (mode))
3645 if (tem == const0_rtx)
3646 return CONST0_RTX (mode);
3647 #ifdef VECTOR_STORE_FLAG_VALUE
3652 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3653 if (val == NULL_RTX)
3655 if (val == const1_rtx)
3656 return CONST1_RTX (mode);
3658 units = GET_MODE_NUNITS (mode);
3659 v = rtvec_alloc (units);
3660 for (i = 0; i < units; i++)
3661 RTVEC_ELT (v, i) = val;
3662 return gen_rtx_raw_CONST_VECTOR (mode, v);
3672 /* For the following tests, ensure const0_rtx is op1. */
3673 if (swap_commutative_operands_p (op0, op1)
3674 || (op0 == const0_rtx && op1 != const0_rtx))
3675 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3677 /* If op0 is a compare, extract the comparison arguments from it. */
3678 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3679 return simplify_relational_operation (code, mode, VOIDmode,
3680 XEXP (op0, 0), XEXP (op0, 1));
3682 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3686 trueop0 = avoid_constant_pool_reference (op0);
3687 trueop1 = avoid_constant_pool_reference (op1);
3688 return simplify_relational_operation_1 (code, mode, cmp_mode,
3692 /* This part of simplify_relational_operation is only used when CMP_MODE
3693 is not in class MODE_CC (i.e. it is a real comparison).
3695 MODE is the mode of the result, while CMP_MODE specifies in which
3696 mode the comparison is done in, so it is the mode of the operands. */
3699 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3700 enum machine_mode cmp_mode, rtx op0, rtx op1)
3702 enum rtx_code op0code = GET_CODE (op0);
3704 if (op1 == const0_rtx && COMPARISON_P (op0))
3706 /* If op0 is a comparison, extract the comparison arguments
3710 if (GET_MODE (op0) == mode)
3711 return simplify_rtx (op0);
3713 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3714 XEXP (op0, 0), XEXP (op0, 1));
3716 else if (code == EQ)
3718 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3719 if (new_code != UNKNOWN)
3720 return simplify_gen_relational (new_code, mode, VOIDmode,
3721 XEXP (op0, 0), XEXP (op0, 1));
3725 if (op1 == const0_rtx)
3727 /* Canonicalize (GTU x 0) as (NE x 0). */
3729 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3730 /* Canonicalize (LEU x 0) as (EQ x 0). */
3732 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3734 else if (op1 == const1_rtx)
3739 /* Canonicalize (GE x 1) as (GT x 0). */
3740 return simplify_gen_relational (GT, mode, cmp_mode,
3743 /* Canonicalize (GEU x 1) as (NE x 0). */
3744 return simplify_gen_relational (NE, mode, cmp_mode,
3747 /* Canonicalize (LT x 1) as (LE x 0). */
3748 return simplify_gen_relational (LE, mode, cmp_mode,
3751 /* Canonicalize (LTU x 1) as (EQ x 0). */
3752 return simplify_gen_relational (EQ, mode, cmp_mode,
3758 else if (op1 == constm1_rtx)
3760 /* Canonicalize (LE x -1) as (LT x 0). */
3762 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3763 /* Canonicalize (GT x -1) as (GE x 0). */
3765 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3768 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3769 if ((code == EQ || code == NE)
3770 && (op0code == PLUS || op0code == MINUS)
3772 && CONSTANT_P (XEXP (op0, 1))
3773 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3775 rtx x = XEXP (op0, 0);
3776 rtx c = XEXP (op0, 1);
3778 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3780 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3783 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3784 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3786 && op1 == const0_rtx
3787 && GET_MODE_CLASS (mode) == MODE_INT
3788 && cmp_mode != VOIDmode
3789 /* ??? Work-around BImode bugs in the ia64 backend. */
3791 && cmp_mode != BImode
3792 && nonzero_bits (op0, cmp_mode) == 1
3793 && STORE_FLAG_VALUE == 1)
3794 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3795 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3796 : lowpart_subreg (mode, op0, cmp_mode);
3798 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3799 if ((code == EQ || code == NE)
3800 && op1 == const0_rtx
3802 return simplify_gen_relational (code, mode, cmp_mode,
3803 XEXP (op0, 0), XEXP (op0, 1));
3805 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3806 if ((code == EQ || code == NE)
3808 && rtx_equal_p (XEXP (op0, 0), op1)
3809 && !side_effects_p (XEXP (op0, 0)))
3810 return simplify_gen_relational (code, mode, cmp_mode,
3811 XEXP (op0, 1), const0_rtx);
3813 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3814 if ((code == EQ || code == NE)
3816 && rtx_equal_p (XEXP (op0, 1), op1)
3817 && !side_effects_p (XEXP (op0, 1)))
3818 return simplify_gen_relational (code, mode, cmp_mode,
3819 XEXP (op0, 0), const0_rtx);
3821 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3822 if ((code == EQ || code == NE)
3824 && (GET_CODE (op1) == CONST_INT
3825 || GET_CODE (op1) == CONST_DOUBLE)
3826 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3827 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3828 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3829 simplify_gen_binary (XOR, cmp_mode,
3830 XEXP (op0, 1), op1));
3832 if (op0code == POPCOUNT && op1 == const0_rtx)
3838 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3839 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3840 XEXP (op0, 0), const0_rtx);
3845 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3846 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3847 XEXP (op0, 0), const0_rtx);
3856 /* Check if the given comparison (done in the given MODE) is actually a
3857 tautology or a contradiction.
3858 If no simplification is possible, this function returns zero.
3859 Otherwise, it returns either const_true_rtx or const0_rtx. */
3862 simplify_const_relational_operation (enum rtx_code code,
3863 enum machine_mode mode,
3866 int equal, op0lt, op0ltu, op1lt, op1ltu;
3871 gcc_assert (mode != VOIDmode
3872 || (GET_MODE (op0) == VOIDmode
3873 && GET_MODE (op1) == VOIDmode));
3875 /* If op0 is a compare, extract the comparison arguments from it. */
3876 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3878 op1 = XEXP (op0, 1);
3879 op0 = XEXP (op0, 0);
3881 if (GET_MODE (op0) != VOIDmode)
3882 mode = GET_MODE (op0);
3883 else if (GET_MODE (op1) != VOIDmode)
3884 mode = GET_MODE (op1);
3889 /* We can't simplify MODE_CC values since we don't know what the
3890 actual comparison is. */
3891 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3894 /* Make sure the constant is second. */
3895 if (swap_commutative_operands_p (op0, op1))
3897 tem = op0, op0 = op1, op1 = tem;
3898 code = swap_condition (code);
3901 trueop0 = avoid_constant_pool_reference (op0);
3902 trueop1 = avoid_constant_pool_reference (op1);
3904 /* For integer comparisons of A and B maybe we can simplify A - B and can
3905 then simplify a comparison of that with zero. If A and B are both either
3906 a register or a CONST_INT, this can't help; testing for these cases will
3907 prevent infinite recursion here and speed things up.
3909 We can only do this for EQ and NE comparisons as otherwise we may
3910 lose or introduce overflow which we cannot disregard as undefined as
3911 we do not know the signedness of the operation on either the left or
3912 the right hand side of the comparison. */
3914 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3915 && (code == EQ || code == NE)
3916 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3917 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3918 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3919 /* We cannot do this if tem is a nonzero address. */
3920 && ! nonzero_address_p (tem))
3921 return simplify_const_relational_operation (signed_condition (code),
3922 mode, tem, const0_rtx);
3924 if (! HONOR_NANS (mode) && code == ORDERED)
3925 return const_true_rtx;
3927 if (! HONOR_NANS (mode) && code == UNORDERED)
3930 /* For modes without NaNs, if the two operands are equal, we know the
3931 result except if they have side-effects. */
3932 if (! HONOR_NANS (GET_MODE (trueop0))
3933 && rtx_equal_p (trueop0, trueop1)
3934 && ! side_effects_p (trueop0))
3935 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3937 /* If the operands are floating-point constants, see if we can fold
3939 else if (GET_CODE (trueop0) == CONST_DOUBLE
3940 && GET_CODE (trueop1) == CONST_DOUBLE
3941 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3943 REAL_VALUE_TYPE d0, d1;
3945 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3946 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3948 /* Comparisons are unordered iff at least one of the values is NaN. */
3949 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3959 return const_true_rtx;
3972 equal = REAL_VALUES_EQUAL (d0, d1);
3973 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3974 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3977 /* Otherwise, see if the operands are both integers. */
3978 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3979 && (GET_CODE (trueop0) == CONST_DOUBLE
3980 || GET_CODE (trueop0) == CONST_INT)
3981 && (GET_CODE (trueop1) == CONST_DOUBLE
3982 || GET_CODE (trueop1) == CONST_INT))
3984 int width = GET_MODE_BITSIZE (mode);
3985 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3986 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3988 /* Get the two words comprising each integer constant. */
3989 if (GET_CODE (trueop0) == CONST_DOUBLE)
3991 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3992 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3996 l0u = l0s = INTVAL (trueop0);
3997 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4000 if (GET_CODE (trueop1) == CONST_DOUBLE)
4002 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4003 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4007 l1u = l1s = INTVAL (trueop1);
4008 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4011 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4012 we have to sign or zero-extend the values. */
4013 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4015 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4016 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4018 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4019 l0s |= ((HOST_WIDE_INT) (-1) << width);
4021 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4022 l1s |= ((HOST_WIDE_INT) (-1) << width);
4024 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4025 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4027 equal = (h0u == h1u && l0u == l1u);
4028 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4029 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4030 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4031 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4034 /* Otherwise, there are some code-specific tests we can make. */
4037 /* Optimize comparisons with upper and lower bounds. */
4038 if (SCALAR_INT_MODE_P (mode)
4039 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4052 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4059 /* x >= min is always true. */
4060 if (rtx_equal_p (trueop1, mmin))
4061 tem = const_true_rtx;
4067 /* x <= max is always true. */
4068 if (rtx_equal_p (trueop1, mmax))
4069 tem = const_true_rtx;
4074 /* x > max is always false. */
4075 if (rtx_equal_p (trueop1, mmax))
4081 /* x < min is always false. */
4082 if (rtx_equal_p (trueop1, mmin))
4089 if (tem == const0_rtx
4090 || tem == const_true_rtx)
4097 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4102 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4103 return const_true_rtx;
4107 /* Optimize abs(x) < 0.0. */
4108 if (trueop1 == CONST0_RTX (mode)
4109 && !HONOR_SNANS (mode)
4110 && (!INTEGRAL_MODE_P (mode)
4111 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4113 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4115 if (GET_CODE (tem) == ABS)
4117 if (INTEGRAL_MODE_P (mode)
4118 && (issue_strict_overflow_warning
4119 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4120 warning (OPT_Wstrict_overflow,
4121 ("assuming signed overflow does not occur when "
4122 "assuming abs (x) < 0 is false"));
4127 /* Optimize popcount (x) < 0. */
4128 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4129 return const_true_rtx;
4133 /* Optimize abs(x) >= 0.0. */
4134 if (trueop1 == CONST0_RTX (mode)
4135 && !HONOR_NANS (mode)
4136 && (!INTEGRAL_MODE_P (mode)
4137 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4139 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4141 if (GET_CODE (tem) == ABS)
4143 if (INTEGRAL_MODE_P (mode)
4144 && (issue_strict_overflow_warning
4145 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4146 warning (OPT_Wstrict_overflow,
4147 ("assuming signed overflow does not occur when "
4148 "assuming abs (x) >= 0 is true"));
4149 return const_true_rtx;
4153 /* Optimize popcount (x) >= 0. */
4154 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4155 return const_true_rtx;
4159 /* Optimize ! (abs(x) < 0.0). */
4160 if (trueop1 == CONST0_RTX (mode))
4162 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4164 if (GET_CODE (tem) == ABS)
4165 return const_true_rtx;
4176 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4182 return equal ? const_true_rtx : const0_rtx;
4185 return ! equal ? const_true_rtx : const0_rtx;
4188 return op0lt ? const_true_rtx : const0_rtx;
4191 return op1lt ? const_true_rtx : const0_rtx;
4193 return op0ltu ? const_true_rtx : const0_rtx;
4195 return op1ltu ? const_true_rtx : const0_rtx;
4198 return equal || op0lt ? const_true_rtx : const0_rtx;
4201 return equal || op1lt ? const_true_rtx : const0_rtx;
4203 return equal || op0ltu ? const_true_rtx : const0_rtx;
4205 return equal || op1ltu ? const_true_rtx : const0_rtx;
4207 return const_true_rtx;
4215 /* Simplify CODE, an operation with result mode MODE and three operands,
4216 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4217 a constant. Return 0 if no simplifications is possible. */
4220 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4221 enum machine_mode op0_mode, rtx op0, rtx op1,
4224 unsigned int width = GET_MODE_BITSIZE (mode);
4226 /* VOIDmode means "infinite" precision. */
4228 width = HOST_BITS_PER_WIDE_INT;
4234 if (GET_CODE (op0) == CONST_INT
4235 && GET_CODE (op1) == CONST_INT
4236 && GET_CODE (op2) == CONST_INT
4237 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4238 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4240 /* Extracting a bit-field from a constant */
4241 HOST_WIDE_INT val = INTVAL (op0);
4243 if (BITS_BIG_ENDIAN)
4244 val >>= (GET_MODE_BITSIZE (op0_mode)
4245 - INTVAL (op2) - INTVAL (op1));
4247 val >>= INTVAL (op2);
4249 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4251 /* First zero-extend. */
4252 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4253 /* If desired, propagate sign bit. */
4254 if (code == SIGN_EXTRACT
4255 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4256 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4259 /* Clear the bits that don't belong in our mode,
4260 unless they and our sign bit are all one.
4261 So we get either a reasonable negative value or a reasonable
4262 unsigned value for this mode. */
4263 if (width < HOST_BITS_PER_WIDE_INT
4264 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4265 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4266 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4268 return gen_int_mode (val, mode);
4273 if (GET_CODE (op0) == CONST_INT)
4274 return op0 != const0_rtx ? op1 : op2;
4276 /* Convert c ? a : a into "a". */
4277 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4280 /* Convert a != b ? a : b into "a". */
4281 if (GET_CODE (op0) == NE
4282 && ! side_effects_p (op0)
4283 && ! HONOR_NANS (mode)
4284 && ! HONOR_SIGNED_ZEROS (mode)
4285 && ((rtx_equal_p (XEXP (op0, 0), op1)
4286 && rtx_equal_p (XEXP (op0, 1), op2))
4287 || (rtx_equal_p (XEXP (op0, 0), op2)
4288 && rtx_equal_p (XEXP (op0, 1), op1))))
4291 /* Convert a == b ? a : b into "b". */
4292 if (GET_CODE (op0) == EQ
4293 && ! side_effects_p (op0)
4294 && ! HONOR_NANS (mode)
4295 && ! HONOR_SIGNED_ZEROS (mode)
4296 && ((rtx_equal_p (XEXP (op0, 0), op1)
4297 && rtx_equal_p (XEXP (op0, 1), op2))
4298 || (rtx_equal_p (XEXP (op0, 0), op2)
4299 && rtx_equal_p (XEXP (op0, 1), op1))))
4302 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4304 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4305 ? GET_MODE (XEXP (op0, 1))
4306 : GET_MODE (XEXP (op0, 0)));
4309 /* Look for happy constants in op1 and op2. */
4310 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4312 HOST_WIDE_INT t = INTVAL (op1);
4313 HOST_WIDE_INT f = INTVAL (op2);
4315 if (t == STORE_FLAG_VALUE && f == 0)
4316 code = GET_CODE (op0);
4317 else if (t == 0 && f == STORE_FLAG_VALUE)
4320 tmp = reversed_comparison_code (op0, NULL_RTX);
4328 return simplify_gen_relational (code, mode, cmp_mode,
4329 XEXP (op0, 0), XEXP (op0, 1));
4332 if (cmp_mode == VOIDmode)
4333 cmp_mode = op0_mode;
4334 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4335 cmp_mode, XEXP (op0, 0),
4338 /* See if any simplifications were possible. */
4341 if (GET_CODE (temp) == CONST_INT)
4342 return temp == const0_rtx ? op2 : op1;
4344 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4350 gcc_assert (GET_MODE (op0) == mode);
4351 gcc_assert (GET_MODE (op1) == mode);
4352 gcc_assert (VECTOR_MODE_P (mode));
4353 op2 = avoid_constant_pool_reference (op2);
4354 if (GET_CODE (op2) == CONST_INT)
4356 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4357 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4358 int mask = (1 << n_elts) - 1;
4360 if (!(INTVAL (op2) & mask))
4362 if ((INTVAL (op2) & mask) == mask)
4365 op0 = avoid_constant_pool_reference (op0);
4366 op1 = avoid_constant_pool_reference (op1);
4367 if (GET_CODE (op0) == CONST_VECTOR
4368 && GET_CODE (op1) == CONST_VECTOR)
4370 rtvec v = rtvec_alloc (n_elts);
4373 for (i = 0; i < n_elts; i++)
4374 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4375 ? CONST_VECTOR_ELT (op0, i)
4376 : CONST_VECTOR_ELT (op1, i));
4377 return gen_rtx_CONST_VECTOR (mode, v);
4389 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4390 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4392 Works by unpacking OP into a collection of 8-bit values
4393 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4394 and then repacking them again for OUTERMODE. */
4397 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4398 enum machine_mode innermode, unsigned int byte)
4400 /* We support up to 512-bit values (for V8DFmode). */
4404 value_mask = (1 << value_bit) - 1
4406 unsigned char value[max_bitsize / value_bit];
4415 rtvec result_v = NULL;
4416 enum mode_class outer_class;
4417 enum machine_mode outer_submode;
4419 /* Some ports misuse CCmode. */
4420 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4423 /* We have no way to represent a complex constant at the rtl level. */
4424 if (COMPLEX_MODE_P (outermode))
4427 /* Unpack the value. */
4429 if (GET_CODE (op) == CONST_VECTOR)
4431 num_elem = CONST_VECTOR_NUNITS (op);
4432 elems = &CONST_VECTOR_ELT (op, 0);
4433 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4439 elem_bitsize = max_bitsize;
4441 /* If this asserts, it is too complicated; reducing value_bit may help. */
4442 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4443 /* I don't know how to handle endianness of sub-units. */
4444 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4446 for (elem = 0; elem < num_elem; elem++)
4449 rtx el = elems[elem];
4451 /* Vectors are kept in target memory order. (This is probably
4454 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4455 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4457 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4458 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4459 unsigned bytele = (subword_byte % UNITS_PER_WORD
4460 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4461 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4464 switch (GET_CODE (el))
4468 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4470 *vp++ = INTVAL (el) >> i;
4471 /* CONST_INTs are always logically sign-extended. */
4472 for (; i < elem_bitsize; i += value_bit)
4473 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4477 if (GET_MODE (el) == VOIDmode)
4479 /* If this triggers, someone should have generated a
4480 CONST_INT instead. */
4481 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4483 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4484 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4485 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4488 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4491 /* It shouldn't matter what's done here, so fill it with
4493 for (; i < elem_bitsize; i += value_bit)
4498 long tmp[max_bitsize / 32];
4499 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4501 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4502 gcc_assert (bitsize <= elem_bitsize);
4503 gcc_assert (bitsize % value_bit == 0);
4505 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4508 /* real_to_target produces its result in words affected by
4509 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4510 and use WORDS_BIG_ENDIAN instead; see the documentation
4511 of SUBREG in rtl.texi. */
4512 for (i = 0; i < bitsize; i += value_bit)
4515 if (WORDS_BIG_ENDIAN)
4516 ibase = bitsize - 1 - i;
4519 *vp++ = tmp[ibase / 32] >> i % 32;
4522 /* It shouldn't matter what's done here, so fill it with
4524 for (; i < elem_bitsize; i += value_bit)
4534 /* Now, pick the right byte to start with. */
4535 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4536 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4537 will already have offset 0. */
4538 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4540 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4542 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4543 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4544 byte = (subword_byte % UNITS_PER_WORD
4545 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4548 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4549 so if it's become negative it will instead be very large.) */
4550 gcc_assert (byte < GET_MODE_SIZE (innermode));
4552 /* Convert from bytes to chunks of size value_bit. */
4553 value_start = byte * (BITS_PER_UNIT / value_bit);
4555 /* Re-pack the value. */
4557 if (VECTOR_MODE_P (outermode))
4559 num_elem = GET_MODE_NUNITS (outermode);
4560 result_v = rtvec_alloc (num_elem);
4561 elems = &RTVEC_ELT (result_v, 0);
4562 outer_submode = GET_MODE_INNER (outermode);
4568 outer_submode = outermode;
4571 outer_class = GET_MODE_CLASS (outer_submode);
4572 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4574 gcc_assert (elem_bitsize % value_bit == 0);
4575 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4577 for (elem = 0; elem < num_elem; elem++)
4581 /* Vectors are stored in target memory order. (This is probably
4584 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4585 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4587 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4588 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4589 unsigned bytele = (subword_byte % UNITS_PER_WORD
4590 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4591 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4594 switch (outer_class)
4597 case MODE_PARTIAL_INT:
4599 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4602 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4604 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4605 for (; i < elem_bitsize; i += value_bit)
4606 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4607 << (i - HOST_BITS_PER_WIDE_INT));
4609 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4611 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4612 elems[elem] = gen_int_mode (lo, outer_submode);
4613 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4614 elems[elem] = immed_double_const (lo, hi, outer_submode);
4621 case MODE_DECIMAL_FLOAT:
4624 long tmp[max_bitsize / 32];
4626 /* real_from_target wants its input in words affected by
4627 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4628 and use WORDS_BIG_ENDIAN instead; see the documentation
4629 of SUBREG in rtl.texi. */
4630 for (i = 0; i < max_bitsize / 32; i++)
4632 for (i = 0; i < elem_bitsize; i += value_bit)
4635 if (WORDS_BIG_ENDIAN)
4636 ibase = elem_bitsize - 1 - i;
4639 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4642 real_from_target (&r, tmp, outer_submode);
4643 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4651 if (VECTOR_MODE_P (outermode))
4652 return gen_rtx_CONST_VECTOR (outermode, result_v);
4657 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4658 Return 0 if no simplifications are possible. */
4660 simplify_subreg (enum machine_mode outermode, rtx op,
4661 enum machine_mode innermode, unsigned int byte)
4663 /* Little bit of sanity checking. */
4664 gcc_assert (innermode != VOIDmode);
4665 gcc_assert (outermode != VOIDmode);
4666 gcc_assert (innermode != BLKmode);
4667 gcc_assert (outermode != BLKmode);
4669 gcc_assert (GET_MODE (op) == innermode
4670 || GET_MODE (op) == VOIDmode);
4672 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4673 gcc_assert (byte < GET_MODE_SIZE (innermode));
4675 if (outermode == innermode && !byte)
4678 if (GET_CODE (op) == CONST_INT
4679 || GET_CODE (op) == CONST_DOUBLE
4680 || GET_CODE (op) == CONST_VECTOR)
4681 return simplify_immed_subreg (outermode, op, innermode, byte);
4683 /* Changing mode twice with SUBREG => just change it once,
4684 or not at all if changing back op starting mode. */
4685 if (GET_CODE (op) == SUBREG)
4687 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4688 int final_offset = byte + SUBREG_BYTE (op);
4691 if (outermode == innermostmode
4692 && byte == 0 && SUBREG_BYTE (op) == 0)
4693 return SUBREG_REG (op);
4695 /* The SUBREG_BYTE represents offset, as if the value were stored
4696 in memory. Irritating exception is paradoxical subreg, where
4697 we define SUBREG_BYTE to be 0. On big endian machines, this
4698 value should be negative. For a moment, undo this exception. */
4699 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4701 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4702 if (WORDS_BIG_ENDIAN)
4703 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4704 if (BYTES_BIG_ENDIAN)
4705 final_offset += difference % UNITS_PER_WORD;
4707 if (SUBREG_BYTE (op) == 0
4708 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4710 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4711 if (WORDS_BIG_ENDIAN)
4712 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4713 if (BYTES_BIG_ENDIAN)
4714 final_offset += difference % UNITS_PER_WORD;
4717 /* See whether resulting subreg will be paradoxical. */
4718 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4720 /* In nonparadoxical subregs we can't handle negative offsets. */
4721 if (final_offset < 0)
4723 /* Bail out in case resulting subreg would be incorrect. */
4724 if (final_offset % GET_MODE_SIZE (outermode)
4725 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4731 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4733 /* In paradoxical subreg, see if we are still looking on lower part.
4734 If so, our SUBREG_BYTE will be 0. */
4735 if (WORDS_BIG_ENDIAN)
4736 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4737 if (BYTES_BIG_ENDIAN)
4738 offset += difference % UNITS_PER_WORD;
4739 if (offset == final_offset)
4745 /* Recurse for further possible simplifications. */
4746 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4750 if (validate_subreg (outermode, innermostmode,
4751 SUBREG_REG (op), final_offset))
4752 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4756 /* Merge implicit and explicit truncations. */
4758 if (GET_CODE (op) == TRUNCATE
4759 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4760 && subreg_lowpart_offset (outermode, innermode) == byte)
4761 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4762 GET_MODE (XEXP (op, 0)));
4764 /* SUBREG of a hard register => just change the register number
4765 and/or mode. If the hard register is not valid in that mode,
4766 suppress this simplification. If the hard register is the stack,
4767 frame, or argument pointer, leave this as a SUBREG. */
4770 && REGNO (op) < FIRST_PSEUDO_REGISTER
4771 #ifdef CANNOT_CHANGE_MODE_CLASS
4772 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4773 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4774 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4776 && ((reload_completed && !frame_pointer_needed)
4777 || (REGNO (op) != FRAME_POINTER_REGNUM
4778 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4779 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4782 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4783 && REGNO (op) != ARG_POINTER_REGNUM
4785 && REGNO (op) != STACK_POINTER_REGNUM
4786 && subreg_offset_representable_p (REGNO (op), innermode,
4789 unsigned int regno = REGNO (op);
4790 unsigned int final_regno
4791 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4793 /* ??? We do allow it if the current REG is not valid for
4794 its mode. This is a kludge to work around how float/complex
4795 arguments are passed on 32-bit SPARC and should be fixed. */
4796 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4797 || ! HARD_REGNO_MODE_OK (regno, innermode))
4800 int final_offset = byte;
4802 /* Adjust offset for paradoxical subregs. */
4804 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4806 int difference = (GET_MODE_SIZE (innermode)
4807 - GET_MODE_SIZE (outermode));
4808 if (WORDS_BIG_ENDIAN)
4809 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4810 if (BYTES_BIG_ENDIAN)
4811 final_offset += difference % UNITS_PER_WORD;
4814 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4816 /* Propagate original regno. We don't have any way to specify
4817 the offset inside original regno, so do so only for lowpart.
4818 The information is used only by alias analysis that can not
4819 grog partial register anyway. */
4821 if (subreg_lowpart_offset (outermode, innermode) == byte)
4822 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4827 /* If we have a SUBREG of a register that we are replacing and we are
4828 replacing it with a MEM, make a new MEM and try replacing the
4829 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4830 or if we would be widening it. */
4833 && ! mode_dependent_address_p (XEXP (op, 0))
4834 /* Allow splitting of volatile memory references in case we don't
4835 have instruction to move the whole thing. */
4836 && (! MEM_VOLATILE_P (op)
4837 || ! have_insn_for (SET, innermode))
4838 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4839 return adjust_address_nv (op, outermode, byte);
4841 /* Handle complex values represented as CONCAT
4842 of real and imaginary part. */
4843 if (GET_CODE (op) == CONCAT)
4845 unsigned int part_size, final_offset;
4848 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4849 if (byte < part_size)
4851 part = XEXP (op, 0);
4852 final_offset = byte;
4856 part = XEXP (op, 1);
4857 final_offset = byte - part_size;
4860 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4863 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4866 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4867 return gen_rtx_SUBREG (outermode, part, final_offset);
4871 /* Optimize SUBREG truncations of zero and sign extended values. */
4872 if ((GET_CODE (op) == ZERO_EXTEND
4873 || GET_CODE (op) == SIGN_EXTEND)
4874 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4876 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4878 /* If we're requesting the lowpart of a zero or sign extension,
4879 there are three possibilities. If the outermode is the same
4880 as the origmode, we can omit both the extension and the subreg.
4881 If the outermode is not larger than the origmode, we can apply
4882 the truncation without the extension. Finally, if the outermode
4883 is larger than the origmode, but both are integer modes, we
4884 can just extend to the appropriate mode. */
4887 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4888 if (outermode == origmode)
4889 return XEXP (op, 0);
4890 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4891 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4892 subreg_lowpart_offset (outermode,
4894 if (SCALAR_INT_MODE_P (outermode))
4895 return simplify_gen_unary (GET_CODE (op), outermode,
4896 XEXP (op, 0), origmode);
4899 /* A SUBREG resulting from a zero extension may fold to zero if
4900 it extracts higher bits that the ZERO_EXTEND's source bits. */
4901 if (GET_CODE (op) == ZERO_EXTEND
4902 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4903 return CONST0_RTX (outermode);
4906 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4907 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4908 the outer subreg is effectively a truncation to the original mode. */
4909 if ((GET_CODE (op) == LSHIFTRT
4910 || GET_CODE (op) == ASHIFTRT)
4911 && SCALAR_INT_MODE_P (outermode)
4912 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4913 to avoid the possibility that an outer LSHIFTRT shifts by more
4914 than the sign extension's sign_bit_copies and introduces zeros
4915 into the high bits of the result. */
4916 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4917 && GET_CODE (XEXP (op, 1)) == CONST_INT
4918 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4920 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4921 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4922 return simplify_gen_binary (ASHIFTRT, outermode,
4923 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4925 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4926 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4927 the outer subreg is effectively a truncation to the original mode. */
4928 if ((GET_CODE (op) == LSHIFTRT
4929 || GET_CODE (op) == ASHIFTRT)
4930 && SCALAR_INT_MODE_P (outermode)
4931 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4932 && GET_CODE (XEXP (op, 1)) == CONST_INT
4933 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4934 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4935 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4936 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4937 return simplify_gen_binary (LSHIFTRT, outermode,
4938 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4940 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4941 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4942 the outer subreg is effectively a truncation to the original mode. */
4943 if (GET_CODE (op) == ASHIFT
4944 && SCALAR_INT_MODE_P (outermode)
4945 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4946 && GET_CODE (XEXP (op, 1)) == CONST_INT
4947 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4948 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4949 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4950 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4951 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4952 return simplify_gen_binary (ASHIFT, outermode,
4953 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4958 /* Make a SUBREG operation or equivalent if it folds. */
4961 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4962 enum machine_mode innermode, unsigned int byte)
4966 newx = simplify_subreg (outermode, op, innermode, byte);
4970 if (GET_CODE (op) == SUBREG
4971 || GET_CODE (op) == CONCAT
4972 || GET_MODE (op) == VOIDmode)
4975 if (validate_subreg (outermode, innermode, op, byte))
4976 return gen_rtx_SUBREG (outermode, op, byte);
4981 /* Simplify X, an rtx expression.
4983 Return the simplified expression or NULL if no simplifications
4986 This is the preferred entry point into the simplification routines;
4987 however, we still allow passes to call the more specific routines.
4989 Right now GCC has three (yes, three) major bodies of RTL simplification
4990 code that need to be unified.
4992 1. fold_rtx in cse.c. This code uses various CSE specific
4993 information to aid in RTL simplification.
4995 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4996 it uses combine specific information to aid in RTL
4999 3. The routines in this file.
5002 Long term we want to only have one body of simplification code; to
5003 get to that state I recommend the following steps:
5005 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5006 which are not pass dependent state into these routines.
5008 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5009 use this routine whenever possible.
5011 3. Allow for pass dependent state to be provided to these
5012 routines and add simplifications based on the pass dependent
5013 state. Remove code from cse.c & combine.c that becomes
5016 It will take time, but ultimately the compiler will be easier to
5017 maintain and improve. It's totally silly that when we add a
5018 simplification that it needs to be added to 4 places (3 for RTL
5019 simplification and 1 for tree simplification. */
5022 simplify_rtx (rtx x)
5024 enum rtx_code code = GET_CODE (x);
5025 enum machine_mode mode = GET_MODE (x);
5027 switch (GET_RTX_CLASS (code))
5030 return simplify_unary_operation (code, mode,
5031 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5032 case RTX_COMM_ARITH:
5033 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5034 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5036 /* Fall through.... */
5039 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5042 case RTX_BITFIELD_OPS:
5043 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5044 XEXP (x, 0), XEXP (x, 1),
5048 case RTX_COMM_COMPARE:
5049 return simplify_relational_operation (code, mode,
5050 ((GET_MODE (XEXP (x, 0))
5052 ? GET_MODE (XEXP (x, 0))
5053 : GET_MODE (XEXP (x, 1))),
5059 return simplify_subreg (mode, SUBREG_REG (x),
5060 GET_MODE (SUBREG_REG (x)),
5067 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5068 if (GET_CODE (XEXP (x, 0)) == HIGH
5069 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))