1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
35 #include "diagnostic-core.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
82 width = GET_MODE_PRECISION (mode);
86 if (width <= HOST_BITS_PER_WIDE_INT
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
97 /* FIXME: We don't yet have a representation for wider modes. */
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
114 if (GET_MODE_CLASS (mode) != MODE_INT)
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
132 if (GET_MODE_CLASS (mode) != MODE_INT)
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
150 if (GET_MODE_CLASS (mode) != MODE_INT)
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
198 /* Handle float extensions of constant pool references. */
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
214 if (GET_MODE (x) == BLKmode)
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
286 case ARRAY_RANGE_REF:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
305 offset += bitpos / BITS_PER_UNIT;
307 offset += TREE_INT_CST_LOW (toffset);
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
420 rtx op0, op1, op2, newx, op;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
441 return simplify_gen_unary (code, mode, op0, op_mode);
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return simplify_gen_binary (code, mode, op0, op1);
452 case RTX_COMM_COMPARE:
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 case RTX_BITFIELD_OPS:
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
484 return op0 ? op0 : x;
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return gen_rtx_LO_SUM (mode, op0, op1);
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 if (op != RTVEC_ELT (vec, j))
531 newvec = shallow_copy_rtvec (vec);
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
548 newx = shallow_copy_rtx (x);
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
789 trueop = avoid_constant_pool_reference (op);
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
795 return simplify_unary_operation_1 (code, mode, op);
798 /* Perform some simplifications we can do even if the operands
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
803 enum rtx_code reversed;
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return plus_constant (mode, XEXP (op, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op) == XOR
832 && CONST_INT_P (XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op) == PLUS
839 && CONST_INT_P (XEXP (op, 1))
840 && mode_signbit_p (mode, XEXP (op, 1))
841 && (temp = simplify_unary_operation (NOT, mode,
842 XEXP (op, 1), mode)) != 0)
843 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
851 if (GET_CODE (op) == ASHIFT
852 && XEXP (op, 0) == const1_rtx)
854 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
862 if (STORE_FLAG_VALUE == -1
863 && GET_CODE (op) == ASHIFTRT
864 && GET_CODE (XEXP (op, 1))
865 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
866 return simplify_gen_relational (GE, mode, VOIDmode,
867 XEXP (op, 0), const0_rtx);
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && (GET_MODE_SIZE (GET_MODE (op))
873 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
874 && GET_CODE (SUBREG_REG (op)) == ASHIFT
875 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
877 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
880 x = gen_rtx_ROTATE (inner_mode,
881 simplify_gen_unary (NOT, inner_mode, const1_rtx,
883 XEXP (SUBREG_REG (op), 1));
884 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
889 /* Apply De Morgan's laws to reduce number of patterns for machines
890 with negating logical insns (and-not, nand, etc.). If result has
891 only one NOT, put it first, since that is how the patterns are
894 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
896 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
897 enum machine_mode op_mode;
899 op_mode = GET_MODE (in1);
900 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
902 op_mode = GET_MODE (in2);
903 if (op_mode == VOIDmode)
905 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
907 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
910 in2 = in1; in1 = tem;
913 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
919 /* (neg (neg X)) == X. */
920 if (GET_CODE (op) == NEG)
923 /* (neg (plus X 1)) can become (not X). */
924 if (GET_CODE (op) == PLUS
925 && XEXP (op, 1) == const1_rtx)
926 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
928 /* Similarly, (neg (not X)) is (plus X 1). */
929 if (GET_CODE (op) == NOT)
930 return plus_constant (mode, XEXP (op, 0), 1);
932 /* (neg (minus X Y)) can become (minus Y X). This transformation
933 isn't safe for modes with signed zeros, since if X and Y are
934 both +0, (minus Y X) is the same as (minus X Y). If the
935 rounding mode is towards +infinity (or -infinity) then the two
936 expressions will be rounded differently. */
937 if (GET_CODE (op) == MINUS
938 && !HONOR_SIGNED_ZEROS (mode)
939 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
940 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
942 if (GET_CODE (op) == PLUS
943 && !HONOR_SIGNED_ZEROS (mode)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
946 /* (neg (plus A C)) is simplified to (minus -C A). */
947 if (CONST_SCALAR_INT_P (XEXP (op, 1))
948 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
950 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
952 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
955 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
956 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
957 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
960 /* (neg (mult A B)) becomes (mult A (neg B)).
961 This works even for floating-point values. */
962 if (GET_CODE (op) == MULT
963 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
965 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
966 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
969 /* NEG commutes with ASHIFT since it is multiplication. Only do
970 this if we can then eliminate the NEG (e.g., if the operand
972 if (GET_CODE (op) == ASHIFT)
974 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
976 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
979 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
980 C is equal to the width of MODE minus 1. */
981 if (GET_CODE (op) == ASHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
984 return simplify_gen_binary (LSHIFTRT, mode,
985 XEXP (op, 0), XEXP (op, 1));
987 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
988 C is equal to the width of MODE minus 1. */
989 if (GET_CODE (op) == LSHIFTRT
990 && CONST_INT_P (XEXP (op, 1))
991 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
992 return simplify_gen_binary (ASHIFTRT, mode,
993 XEXP (op, 0), XEXP (op, 1));
995 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
996 if (GET_CODE (op) == XOR
997 && XEXP (op, 1) == const1_rtx
998 && nonzero_bits (XEXP (op, 0), mode) == 1)
999 return plus_constant (mode, XEXP (op, 0), -1);
1001 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1002 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1003 if (GET_CODE (op) == LT
1004 && XEXP (op, 1) == const0_rtx
1005 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1007 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1008 int isize = GET_MODE_PRECISION (inner);
1009 if (STORE_FLAG_VALUE == 1)
1011 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1012 GEN_INT (isize - 1));
1015 if (GET_MODE_PRECISION (mode) > isize)
1016 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1017 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1019 else if (STORE_FLAG_VALUE == -1)
1021 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1022 GEN_INT (isize - 1));
1025 if (GET_MODE_PRECISION (mode) > isize)
1026 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1027 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1033 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1034 with the umulXi3_highpart patterns. */
1035 if (GET_CODE (op) == LSHIFTRT
1036 && GET_CODE (XEXP (op, 0)) == MULT)
1039 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1041 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1043 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1047 /* We can't handle truncation to a partial integer mode here
1048 because we don't know the real bitsize of the partial
1053 if (GET_MODE (op) != VOIDmode)
1055 temp = simplify_truncation (mode, op, GET_MODE (op));
1060 /* If we know that the value is already truncated, we can
1061 replace the TRUNCATE with a SUBREG. */
1062 if (GET_MODE_NUNITS (mode) == 1
1063 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1064 || truncated_to_mode (mode, op)))
1066 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1071 /* A truncate of a comparison can be replaced with a subreg if
1072 STORE_FLAG_VALUE permits. This is like the previous test,
1073 but it works even if the comparison is done in a mode larger
1074 than HOST_BITS_PER_WIDE_INT. */
1075 if (HWI_COMPUTABLE_MODE_P (mode)
1076 && COMPARISON_P (op)
1077 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1079 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1084 /* A truncate of a memory is just loading the low part of the memory
1085 if we are not changing the meaning of the address. */
1086 if (GET_CODE (op) == MEM
1087 && !VECTOR_MODE_P (mode)
1088 && !MEM_VOLATILE_P (op)
1089 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1091 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1098 case FLOAT_TRUNCATE:
1099 if (DECIMAL_FLOAT_MODE_P (mode))
1102 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1103 if (GET_CODE (op) == FLOAT_EXTEND
1104 && GET_MODE (XEXP (op, 0)) == mode)
1105 return XEXP (op, 0);
1107 /* (float_truncate:SF (float_truncate:DF foo:XF))
1108 = (float_truncate:SF foo:XF).
1109 This may eliminate double rounding, so it is unsafe.
1111 (float_truncate:SF (float_extend:XF foo:DF))
1112 = (float_truncate:SF foo:DF).
1114 (float_truncate:DF (float_extend:XF foo:SF))
1115 = (float_extend:SF foo:DF). */
1116 if ((GET_CODE (op) == FLOAT_TRUNCATE
1117 && flag_unsafe_math_optimizations)
1118 || GET_CODE (op) == FLOAT_EXTEND)
1119 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1121 > GET_MODE_SIZE (mode)
1122 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1124 XEXP (op, 0), mode);
1126 /* (float_truncate (float x)) is (float x) */
1127 if (GET_CODE (op) == FLOAT
1128 && (flag_unsafe_math_optimizations
1129 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1130 && ((unsigned)significand_size (GET_MODE (op))
1131 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1132 - num_sign_bit_copies (XEXP (op, 0),
1133 GET_MODE (XEXP (op, 0))))))))
1134 return simplify_gen_unary (FLOAT, mode,
1136 GET_MODE (XEXP (op, 0)));
1138 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1139 (OP:SF foo:SF) if OP is NEG or ABS. */
1140 if ((GET_CODE (op) == ABS
1141 || GET_CODE (op) == NEG)
1142 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1143 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1144 return simplify_gen_unary (GET_CODE (op), mode,
1145 XEXP (XEXP (op, 0), 0), mode);
1147 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1148 is (float_truncate:SF x). */
1149 if (GET_CODE (op) == SUBREG
1150 && subreg_lowpart_p (op)
1151 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1152 return SUBREG_REG (op);
1156 if (DECIMAL_FLOAT_MODE_P (mode))
1159 /* (float_extend (float_extend x)) is (float_extend x)
1161 (float_extend (float x)) is (float x) assuming that double
1162 rounding can't happen.
1164 if (GET_CODE (op) == FLOAT_EXTEND
1165 || (GET_CODE (op) == FLOAT
1166 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1167 && ((unsigned)significand_size (GET_MODE (op))
1168 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1169 - num_sign_bit_copies (XEXP (op, 0),
1170 GET_MODE (XEXP (op, 0)))))))
1171 return simplify_gen_unary (GET_CODE (op), mode,
1173 GET_MODE (XEXP (op, 0)));
1178 /* (abs (neg <foo>)) -> (abs <foo>) */
1179 if (GET_CODE (op) == NEG)
1180 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1181 GET_MODE (XEXP (op, 0)));
1183 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1185 if (GET_MODE (op) == VOIDmode)
1188 /* If operand is something known to be positive, ignore the ABS. */
1189 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1190 || val_signbit_known_clear_p (GET_MODE (op),
1191 nonzero_bits (op, GET_MODE (op))))
1194 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1195 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1196 return gen_rtx_NEG (mode, op);
1201 /* (ffs (*_extend <X>)) = (ffs <X>) */
1202 if (GET_CODE (op) == SIGN_EXTEND
1203 || GET_CODE (op) == ZERO_EXTEND)
1204 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1205 GET_MODE (XEXP (op, 0)));
1209 switch (GET_CODE (op))
1213 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1214 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1215 GET_MODE (XEXP (op, 0)));
1219 /* Rotations don't affect popcount. */
1220 if (!side_effects_p (XEXP (op, 1)))
1221 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1222 GET_MODE (XEXP (op, 0)));
1231 switch (GET_CODE (op))
1237 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1238 GET_MODE (XEXP (op, 0)));
1242 /* Rotations don't affect parity. */
1243 if (!side_effects_p (XEXP (op, 1)))
1244 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1254 /* (bswap (bswap x)) -> x. */
1255 if (GET_CODE (op) == BSWAP)
1256 return XEXP (op, 0);
1260 /* (float (sign_extend <X>)) = (float <X>). */
1261 if (GET_CODE (op) == SIGN_EXTEND)
1262 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1263 GET_MODE (XEXP (op, 0)));
1267 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1268 becomes just the MINUS if its mode is MODE. This allows
1269 folding switch statements on machines using casesi (such as
1271 if (GET_CODE (op) == TRUNCATE
1272 && GET_MODE (XEXP (op, 0)) == mode
1273 && GET_CODE (XEXP (op, 0)) == MINUS
1274 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1275 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1276 return XEXP (op, 0);
1278 /* Extending a widening multiplication should be canonicalized to
1279 a wider widening multiplication. */
1280 if (GET_CODE (op) == MULT)
1282 rtx lhs = XEXP (op, 0);
1283 rtx rhs = XEXP (op, 1);
1284 enum rtx_code lcode = GET_CODE (lhs);
1285 enum rtx_code rcode = GET_CODE (rhs);
1287 /* Widening multiplies usually extend both operands, but sometimes
1288 they use a shift to extract a portion of a register. */
1289 if ((lcode == SIGN_EXTEND
1290 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1291 && (rcode == SIGN_EXTEND
1292 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1294 enum machine_mode lmode = GET_MODE (lhs);
1295 enum machine_mode rmode = GET_MODE (rhs);
1298 if (lcode == ASHIFTRT)
1299 /* Number of bits not shifted off the end. */
1300 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1301 else /* lcode == SIGN_EXTEND */
1302 /* Size of inner mode. */
1303 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1305 if (rcode == ASHIFTRT)
1306 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1307 else /* rcode == SIGN_EXTEND */
1308 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1310 /* We can only widen multiplies if the result is mathematiclly
1311 equivalent. I.e. if overflow was impossible. */
1312 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1313 return simplify_gen_binary
1315 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1316 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1320 /* Check for a sign extension of a subreg of a promoted
1321 variable, where the promotion is sign-extended, and the
1322 target mode is the same as the variable's promotion. */
1323 if (GET_CODE (op) == SUBREG
1324 && SUBREG_PROMOTED_VAR_P (op)
1325 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1326 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1328 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1333 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1334 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1335 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1337 gcc_assert (GET_MODE_BITSIZE (mode)
1338 > GET_MODE_BITSIZE (GET_MODE (op)));
1339 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1343 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1344 is (sign_extend:M (subreg:O <X>)) if there is mode with
1345 GET_MODE_BITSIZE (N) - I bits.
1346 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1347 is similarly (zero_extend:M (subreg:O <X>)). */
1348 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1349 && GET_CODE (XEXP (op, 0)) == ASHIFT
1350 && CONST_INT_P (XEXP (op, 1))
1351 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1352 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1354 enum machine_mode tmode
1355 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1356 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1357 gcc_assert (GET_MODE_BITSIZE (mode)
1358 > GET_MODE_BITSIZE (GET_MODE (op)));
1359 if (tmode != BLKmode)
1362 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1364 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1365 ? SIGN_EXTEND : ZERO_EXTEND,
1366 mode, inner, tmode);
1370 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1371 /* As we do not know which address space the pointer is referring to,
1372 we can do this only if the target does not support different pointer
1373 or address modes depending on the address space. */
1374 if (target_default_pointer_address_modes_p ()
1375 && ! POINTERS_EXTEND_UNSIGNED
1376 && mode == Pmode && GET_MODE (op) == ptr_mode
1378 || (GET_CODE (op) == SUBREG
1379 && REG_P (SUBREG_REG (op))
1380 && REG_POINTER (SUBREG_REG (op))
1381 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1382 return convert_memory_address (Pmode, op);
1387 /* Check for a zero extension of a subreg of a promoted
1388 variable, where the promotion is zero-extended, and the
1389 target mode is the same as the variable's promotion. */
1390 if (GET_CODE (op) == SUBREG
1391 && SUBREG_PROMOTED_VAR_P (op)
1392 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1393 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1395 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1400 /* Extending a widening multiplication should be canonicalized to
1401 a wider widening multiplication. */
1402 if (GET_CODE (op) == MULT)
1404 rtx lhs = XEXP (op, 0);
1405 rtx rhs = XEXP (op, 1);
1406 enum rtx_code lcode = GET_CODE (lhs);
1407 enum rtx_code rcode = GET_CODE (rhs);
1409 /* Widening multiplies usually extend both operands, but sometimes
1410 they use a shift to extract a portion of a register. */
1411 if ((lcode == ZERO_EXTEND
1412 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1413 && (rcode == ZERO_EXTEND
1414 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1416 enum machine_mode lmode = GET_MODE (lhs);
1417 enum machine_mode rmode = GET_MODE (rhs);
1420 if (lcode == LSHIFTRT)
1421 /* Number of bits not shifted off the end. */
1422 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1423 else /* lcode == ZERO_EXTEND */
1424 /* Size of inner mode. */
1425 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1427 if (rcode == LSHIFTRT)
1428 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1429 else /* rcode == ZERO_EXTEND */
1430 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1432 /* We can only widen multiplies if the result is mathematiclly
1433 equivalent. I.e. if overflow was impossible. */
1434 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1435 return simplify_gen_binary
1437 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1438 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1442 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1443 if (GET_CODE (op) == ZERO_EXTEND)
1444 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1445 GET_MODE (XEXP (op, 0)));
1447 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1448 is (zero_extend:M (subreg:O <X>)) if there is mode with
1449 GET_MODE_BITSIZE (N) - I bits. */
1450 if (GET_CODE (op) == LSHIFTRT
1451 && GET_CODE (XEXP (op, 0)) == ASHIFT
1452 && CONST_INT_P (XEXP (op, 1))
1453 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1454 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1456 enum machine_mode tmode
1457 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1458 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1459 if (tmode != BLKmode)
1462 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1464 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1468 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1469 /* As we do not know which address space the pointer is referring to,
1470 we can do this only if the target does not support different pointer
1471 or address modes depending on the address space. */
1472 if (target_default_pointer_address_modes_p ()
1473 && POINTERS_EXTEND_UNSIGNED > 0
1474 && mode == Pmode && GET_MODE (op) == ptr_mode
1476 || (GET_CODE (op) == SUBREG
1477 && REG_P (SUBREG_REG (op))
1478 && REG_POINTER (SUBREG_REG (op))
1479 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1480 return convert_memory_address (Pmode, op);
1491 /* Try to compute the value of a unary operation CODE whose output mode is to
1492 be MODE with input operand OP whose mode was originally OP_MODE.
1493 Return zero if the value cannot be computed. */
1495 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1496 rtx op, enum machine_mode op_mode)
1498 unsigned int width = GET_MODE_PRECISION (mode);
1499 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1501 if (code == VEC_DUPLICATE)
1503 gcc_assert (VECTOR_MODE_P (mode));
1504 if (GET_MODE (op) != VOIDmode)
1506 if (!VECTOR_MODE_P (GET_MODE (op)))
1507 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1509 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1512 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1513 || GET_CODE (op) == CONST_VECTOR)
1515 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1516 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1517 rtvec v = rtvec_alloc (n_elts);
1520 if (GET_CODE (op) != CONST_VECTOR)
1521 for (i = 0; i < n_elts; i++)
1522 RTVEC_ELT (v, i) = op;
1525 enum machine_mode inmode = GET_MODE (op);
1526 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1527 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1529 gcc_assert (in_n_elts < n_elts);
1530 gcc_assert ((n_elts % in_n_elts) == 0);
1531 for (i = 0; i < n_elts; i++)
1532 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1534 return gen_rtx_CONST_VECTOR (mode, v);
1538 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1540 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1541 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1542 enum machine_mode opmode = GET_MODE (op);
1543 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1544 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1545 rtvec v = rtvec_alloc (n_elts);
1548 gcc_assert (op_n_elts == n_elts);
1549 for (i = 0; i < n_elts; i++)
1551 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1552 CONST_VECTOR_ELT (op, i),
1553 GET_MODE_INNER (opmode));
1556 RTVEC_ELT (v, i) = x;
1558 return gen_rtx_CONST_VECTOR (mode, v);
1561 /* The order of these tests is critical so that, for example, we don't
1562 check the wrong mode (input vs. output) for a conversion operation,
1563 such as FIX. At some point, this should be simplified. */
1565 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1567 HOST_WIDE_INT hv, lv;
1570 if (CONST_INT_P (op))
1571 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1573 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1575 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1576 d = real_value_truncate (mode, d);
1577 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1579 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1581 HOST_WIDE_INT hv, lv;
1584 if (CONST_INT_P (op))
1585 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1587 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1589 if (op_mode == VOIDmode
1590 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1591 /* We should never get a negative number. */
1592 gcc_assert (hv >= 0);
1593 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1594 hv = 0, lv &= GET_MODE_MASK (op_mode);
1596 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1597 d = real_value_truncate (mode, d);
1598 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1601 if (CONST_INT_P (op)
1602 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1604 HOST_WIDE_INT arg0 = INTVAL (op);
1618 val = (arg0 >= 0 ? arg0 : - arg0);
1622 arg0 &= GET_MODE_MASK (mode);
1623 val = ffs_hwi (arg0);
1627 arg0 &= GET_MODE_MASK (mode);
1628 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1631 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1635 arg0 &= GET_MODE_MASK (mode);
1637 val = GET_MODE_PRECISION (mode) - 1;
1639 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1641 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1645 arg0 &= GET_MODE_MASK (mode);
1648 /* Even if the value at zero is undefined, we have to come
1649 up with some replacement. Seems good enough. */
1650 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1651 val = GET_MODE_PRECISION (mode);
1654 val = ctz_hwi (arg0);
1658 arg0 &= GET_MODE_MASK (mode);
1661 val++, arg0 &= arg0 - 1;
1665 arg0 &= GET_MODE_MASK (mode);
1668 val++, arg0 &= arg0 - 1;
1677 for (s = 0; s < width; s += 8)
1679 unsigned int d = width - s - 8;
1680 unsigned HOST_WIDE_INT byte;
1681 byte = (arg0 >> s) & 0xff;
1692 /* When zero-extending a CONST_INT, we need to know its
1694 gcc_assert (op_mode != VOIDmode);
1695 if (op_width == HOST_BITS_PER_WIDE_INT)
1697 /* If we were really extending the mode,
1698 we would have to distinguish between zero-extension
1699 and sign-extension. */
1700 gcc_assert (width == op_width);
1703 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1704 val = arg0 & GET_MODE_MASK (op_mode);
1710 if (op_mode == VOIDmode)
1712 op_width = GET_MODE_PRECISION (op_mode);
1713 if (op_width == HOST_BITS_PER_WIDE_INT)
1715 /* If we were really extending the mode,
1716 we would have to distinguish between zero-extension
1717 and sign-extension. */
1718 gcc_assert (width == op_width);
1721 else if (op_width < HOST_BITS_PER_WIDE_INT)
1723 val = arg0 & GET_MODE_MASK (op_mode);
1724 if (val_signbit_known_set_p (op_mode, val))
1725 val |= ~GET_MODE_MASK (op_mode);
1733 case FLOAT_TRUNCATE:
1745 return gen_int_mode (val, mode);
1748 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1749 for a DImode operation on a CONST_INT. */
1750 else if (width <= HOST_BITS_PER_DOUBLE_INT
1751 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1753 double_int first, value;
1755 if (CONST_DOUBLE_AS_INT_P (op))
1756 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1757 CONST_DOUBLE_LOW (op));
1759 first = double_int::from_shwi (INTVAL (op));
1772 if (first.is_negative ())
1781 value.low = ffs_hwi (first.low);
1782 else if (first.high != 0)
1783 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1790 if (first.high != 0)
1791 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1792 - HOST_BITS_PER_WIDE_INT;
1793 else if (first.low != 0)
1794 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1795 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1796 value.low = GET_MODE_PRECISION (mode);
1802 value.low = ctz_hwi (first.low);
1803 else if (first.high != 0)
1804 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1805 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1806 value.low = GET_MODE_PRECISION (mode);
1810 value = double_int_zero;
1814 first.low &= first.low - 1;
1819 first.high &= first.high - 1;
1824 value = double_int_zero;
1828 first.low &= first.low - 1;
1833 first.high &= first.high - 1;
1842 value = double_int_zero;
1843 for (s = 0; s < width; s += 8)
1845 unsigned int d = width - s - 8;
1846 unsigned HOST_WIDE_INT byte;
1848 if (s < HOST_BITS_PER_WIDE_INT)
1849 byte = (first.low >> s) & 0xff;
1851 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1853 if (d < HOST_BITS_PER_WIDE_INT)
1854 value.low |= byte << d;
1856 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1862 /* This is just a change-of-mode, so do nothing. */
1867 gcc_assert (op_mode != VOIDmode);
1869 if (op_width > HOST_BITS_PER_WIDE_INT)
1872 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1876 if (op_mode == VOIDmode
1877 || op_width > HOST_BITS_PER_WIDE_INT)
1881 value.low = first.low & GET_MODE_MASK (op_mode);
1882 if (val_signbit_known_set_p (op_mode, value.low))
1883 value.low |= ~GET_MODE_MASK (op_mode);
1885 value.high = HWI_SIGN_EXTEND (value.low);
1896 return immed_double_int_const (value, mode);
1899 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1900 && SCALAR_FLOAT_MODE_P (mode)
1901 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1903 REAL_VALUE_TYPE d, t;
1904 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1909 if (HONOR_SNANS (mode) && real_isnan (&d))
1911 real_sqrt (&t, mode, &d);
1915 d = real_value_abs (&d);
1918 d = real_value_negate (&d);
1920 case FLOAT_TRUNCATE:
1921 d = real_value_truncate (mode, d);
1924 /* All this does is change the mode, unless changing
1926 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1927 real_convert (&d, mode, &d);
1930 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1937 real_to_target (tmp, &d, GET_MODE (op));
1938 for (i = 0; i < 4; i++)
1940 real_from_target (&d, tmp, mode);
1946 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1949 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1950 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1951 && GET_MODE_CLASS (mode) == MODE_INT
1952 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1954 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1955 operators are intentionally left unspecified (to ease implementation
1956 by target backends), for consistency, this routine implements the
1957 same semantics for constant folding as used by the middle-end. */
1959 /* This was formerly used only for non-IEEE float.
1960 eggert@twinsun.com says it is safe for IEEE also. */
1961 HOST_WIDE_INT xh, xl, th, tl;
1962 REAL_VALUE_TYPE x, t;
1963 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1967 if (REAL_VALUE_ISNAN (x))
1970 /* Test against the signed upper bound. */
1971 if (width > HOST_BITS_PER_WIDE_INT)
1973 th = ((unsigned HOST_WIDE_INT) 1
1974 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1980 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1982 real_from_integer (&t, VOIDmode, tl, th, 0);
1983 if (REAL_VALUES_LESS (t, x))
1990 /* Test against the signed lower bound. */
1991 if (width > HOST_BITS_PER_WIDE_INT)
1993 th = (unsigned HOST_WIDE_INT) (-1)
1994 << (width - HOST_BITS_PER_WIDE_INT - 1);
2000 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2002 real_from_integer (&t, VOIDmode, tl, th, 0);
2003 if (REAL_VALUES_LESS (x, t))
2009 REAL_VALUE_TO_INT (&xl, &xh, x);
2013 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2016 /* Test against the unsigned upper bound. */
2017 if (width == HOST_BITS_PER_DOUBLE_INT)
2022 else if (width >= HOST_BITS_PER_WIDE_INT)
2024 th = ((unsigned HOST_WIDE_INT) 1
2025 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2031 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2033 real_from_integer (&t, VOIDmode, tl, th, 1);
2034 if (REAL_VALUES_LESS (t, x))
2041 REAL_VALUE_TO_INT (&xl, &xh, x);
2047 return immed_double_const (xl, xh, mode);
2053 /* Subroutine of simplify_binary_operation to simplify a commutative,
2054 associative binary operation CODE with result mode MODE, operating
2055 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2056 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2057 canonicalization is possible. */
2060 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2065 /* Linearize the operator to the left. */
2066 if (GET_CODE (op1) == code)
2068 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2069 if (GET_CODE (op0) == code)
2071 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2072 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2075 /* "a op (b op c)" becomes "(b op c) op a". */
2076 if (! swap_commutative_operands_p (op1, op0))
2077 return simplify_gen_binary (code, mode, op1, op0);
2084 if (GET_CODE (op0) == code)
2086 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2087 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2089 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2090 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2093 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2094 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2096 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2098 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2099 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2101 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2108 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2109 and OP1. Return 0 if no simplification is possible.
2111 Don't use this for relational operations such as EQ or LT.
2112 Use simplify_relational_operation instead. */
2114 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2117 rtx trueop0, trueop1;
2120 /* Relational operations don't work here. We must know the mode
2121 of the operands in order to do the comparison correctly.
2122 Assuming a full word can give incorrect results.
2123 Consider comparing 128 with -128 in QImode. */
2124 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2125 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2127 /* Make sure the constant is second. */
2128 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2129 && swap_commutative_operands_p (op0, op1))
2131 tem = op0, op0 = op1, op1 = tem;
2134 trueop0 = avoid_constant_pool_reference (op0);
2135 trueop1 = avoid_constant_pool_reference (op1);
2137 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2140 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2143 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2144 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2145 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2146 actual constants. */
2149 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2150 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2152 rtx tem, reversed, opleft, opright;
2154 unsigned int width = GET_MODE_PRECISION (mode);
2156 /* Even if we can't compute a constant result,
2157 there are some cases worth simplifying. */
2162 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2163 when x is NaN, infinite, or finite and nonzero. They aren't
2164 when x is -0 and the rounding mode is not towards -infinity,
2165 since (-0) + 0 is then 0. */
2166 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2169 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2170 transformations are safe even for IEEE. */
2171 if (GET_CODE (op0) == NEG)
2172 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2173 else if (GET_CODE (op1) == NEG)
2174 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2176 /* (~a) + 1 -> -a */
2177 if (INTEGRAL_MODE_P (mode)
2178 && GET_CODE (op0) == NOT
2179 && trueop1 == const1_rtx)
2180 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2182 /* Handle both-operands-constant cases. We can only add
2183 CONST_INTs to constants since the sum of relocatable symbols
2184 can't be handled by most assemblers. Don't add CONST_INT
2185 to CONST_INT since overflow won't be computed properly if wider
2186 than HOST_BITS_PER_WIDE_INT. */
2188 if ((GET_CODE (op0) == CONST
2189 || GET_CODE (op0) == SYMBOL_REF
2190 || GET_CODE (op0) == LABEL_REF)
2191 && CONST_INT_P (op1))
2192 return plus_constant (mode, op0, INTVAL (op1));
2193 else if ((GET_CODE (op1) == CONST
2194 || GET_CODE (op1) == SYMBOL_REF
2195 || GET_CODE (op1) == LABEL_REF)
2196 && CONST_INT_P (op0))
2197 return plus_constant (mode, op1, INTVAL (op0));
2199 /* See if this is something like X * C - X or vice versa or
2200 if the multiplication is written as a shift. If so, we can
2201 distribute and make a new multiply, shift, or maybe just
2202 have X (if C is 2 in the example above). But don't make
2203 something more expensive than we had before. */
2205 if (SCALAR_INT_MODE_P (mode))
2207 double_int coeff0, coeff1;
2208 rtx lhs = op0, rhs = op1;
2210 coeff0 = double_int_one;
2211 coeff1 = double_int_one;
2213 if (GET_CODE (lhs) == NEG)
2215 coeff0 = double_int_minus_one;
2216 lhs = XEXP (lhs, 0);
2218 else if (GET_CODE (lhs) == MULT
2219 && CONST_INT_P (XEXP (lhs, 1)))
2221 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2222 lhs = XEXP (lhs, 0);
2224 else if (GET_CODE (lhs) == ASHIFT
2225 && CONST_INT_P (XEXP (lhs, 1))
2226 && INTVAL (XEXP (lhs, 1)) >= 0
2227 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2229 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2230 lhs = XEXP (lhs, 0);
2233 if (GET_CODE (rhs) == NEG)
2235 coeff1 = double_int_minus_one;
2236 rhs = XEXP (rhs, 0);
2238 else if (GET_CODE (rhs) == MULT
2239 && CONST_INT_P (XEXP (rhs, 1)))
2241 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2242 rhs = XEXP (rhs, 0);
2244 else if (GET_CODE (rhs) == ASHIFT
2245 && CONST_INT_P (XEXP (rhs, 1))
2246 && INTVAL (XEXP (rhs, 1)) >= 0
2247 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2249 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2250 rhs = XEXP (rhs, 0);
2253 if (rtx_equal_p (lhs, rhs))
2255 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2258 bool speed = optimize_function_for_speed_p (cfun);
2260 val = coeff0 + coeff1;
2261 coeff = immed_double_int_const (val, mode);
2263 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2264 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2269 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2270 if (CONST_SCALAR_INT_P (op1)
2271 && GET_CODE (op0) == XOR
2272 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2273 && mode_signbit_p (mode, op1))
2274 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2275 simplify_gen_binary (XOR, mode, op1,
2278 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2279 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2280 && GET_CODE (op0) == MULT
2281 && GET_CODE (XEXP (op0, 0)) == NEG)
2285 in1 = XEXP (XEXP (op0, 0), 0);
2286 in2 = XEXP (op0, 1);
2287 return simplify_gen_binary (MINUS, mode, op1,
2288 simplify_gen_binary (MULT, mode,
2292 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2293 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2295 if (COMPARISON_P (op0)
2296 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2297 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2298 && (reversed = reversed_comparison (op0, mode)))
2300 simplify_gen_unary (NEG, mode, reversed, mode);
2302 /* If one of the operands is a PLUS or a MINUS, see if we can
2303 simplify this by the associative law.
2304 Don't use the associative law for floating point.
2305 The inaccuracy makes it nonassociative,
2306 and subtle programs can break if operations are associated. */
2308 if (INTEGRAL_MODE_P (mode)
2309 && (plus_minus_operand_p (op0)
2310 || plus_minus_operand_p (op1))
2311 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2314 /* Reassociate floating point addition only when the user
2315 specifies associative math operations. */
2316 if (FLOAT_MODE_P (mode)
2317 && flag_associative_math)
2319 tem = simplify_associative_operation (code, mode, op0, op1);
2326 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2327 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2328 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2329 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2331 rtx xop00 = XEXP (op0, 0);
2332 rtx xop10 = XEXP (op1, 0);
2335 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2337 if (REG_P (xop00) && REG_P (xop10)
2338 && GET_MODE (xop00) == GET_MODE (xop10)
2339 && REGNO (xop00) == REGNO (xop10)
2340 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2341 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2348 /* We can't assume x-x is 0 even with non-IEEE floating point,
2349 but since it is zero except in very strange circumstances, we
2350 will treat it as zero with -ffinite-math-only. */
2351 if (rtx_equal_p (trueop0, trueop1)
2352 && ! side_effects_p (op0)
2353 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2354 return CONST0_RTX (mode);
2356 /* Change subtraction from zero into negation. (0 - x) is the
2357 same as -x when x is NaN, infinite, or finite and nonzero.
2358 But if the mode has signed zeros, and does not round towards
2359 -infinity, then 0 - 0 is 0, not -0. */
2360 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2361 return simplify_gen_unary (NEG, mode, op1, mode);
2363 /* (-1 - a) is ~a. */
2364 if (trueop0 == constm1_rtx)
2365 return simplify_gen_unary (NOT, mode, op1, mode);
2367 /* Subtracting 0 has no effect unless the mode has signed zeros
2368 and supports rounding towards -infinity. In such a case,
2370 if (!(HONOR_SIGNED_ZEROS (mode)
2371 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2372 && trueop1 == CONST0_RTX (mode))
2375 /* See if this is something like X * C - X or vice versa or
2376 if the multiplication is written as a shift. If so, we can
2377 distribute and make a new multiply, shift, or maybe just
2378 have X (if C is 2 in the example above). But don't make
2379 something more expensive than we had before. */
2381 if (SCALAR_INT_MODE_P (mode))
2383 double_int coeff0, negcoeff1;
2384 rtx lhs = op0, rhs = op1;
2386 coeff0 = double_int_one;
2387 negcoeff1 = double_int_minus_one;
2389 if (GET_CODE (lhs) == NEG)
2391 coeff0 = double_int_minus_one;
2392 lhs = XEXP (lhs, 0);
2394 else if (GET_CODE (lhs) == MULT
2395 && CONST_INT_P (XEXP (lhs, 1)))
2397 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2398 lhs = XEXP (lhs, 0);
2400 else if (GET_CODE (lhs) == ASHIFT
2401 && CONST_INT_P (XEXP (lhs, 1))
2402 && INTVAL (XEXP (lhs, 1)) >= 0
2403 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2405 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2406 lhs = XEXP (lhs, 0);
2409 if (GET_CODE (rhs) == NEG)
2411 negcoeff1 = double_int_one;
2412 rhs = XEXP (rhs, 0);
2414 else if (GET_CODE (rhs) == MULT
2415 && CONST_INT_P (XEXP (rhs, 1)))
2417 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2418 rhs = XEXP (rhs, 0);
2420 else if (GET_CODE (rhs) == ASHIFT
2421 && CONST_INT_P (XEXP (rhs, 1))
2422 && INTVAL (XEXP (rhs, 1)) >= 0
2423 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2425 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2426 negcoeff1 = -negcoeff1;
2427 rhs = XEXP (rhs, 0);
2430 if (rtx_equal_p (lhs, rhs))
2432 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2435 bool speed = optimize_function_for_speed_p (cfun);
2437 val = coeff0 + negcoeff1;
2438 coeff = immed_double_int_const (val, mode);
2440 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2441 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2446 /* (a - (-b)) -> (a + b). True even for IEEE. */
2447 if (GET_CODE (op1) == NEG)
2448 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2450 /* (-x - c) may be simplified as (-c - x). */
2451 if (GET_CODE (op0) == NEG
2452 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2454 tem = simplify_unary_operation (NEG, mode, op1, mode);
2456 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2459 /* Don't let a relocatable value get a negative coeff. */
2460 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2461 return simplify_gen_binary (PLUS, mode,
2463 neg_const_int (mode, op1));
2465 /* (x - (x & y)) -> (x & ~y) */
2466 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2468 if (rtx_equal_p (op0, XEXP (op1, 0)))
2470 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2471 GET_MODE (XEXP (op1, 1)));
2472 return simplify_gen_binary (AND, mode, op0, tem);
2474 if (rtx_equal_p (op0, XEXP (op1, 1)))
2476 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2477 GET_MODE (XEXP (op1, 0)));
2478 return simplify_gen_binary (AND, mode, op0, tem);
2482 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2483 by reversing the comparison code if valid. */
2484 if (STORE_FLAG_VALUE == 1
2485 && trueop0 == const1_rtx
2486 && COMPARISON_P (op1)
2487 && (reversed = reversed_comparison (op1, mode)))
2490 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2491 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2492 && GET_CODE (op1) == MULT
2493 && GET_CODE (XEXP (op1, 0)) == NEG)
2497 in1 = XEXP (XEXP (op1, 0), 0);
2498 in2 = XEXP (op1, 1);
2499 return simplify_gen_binary (PLUS, mode,
2500 simplify_gen_binary (MULT, mode,
2505 /* Canonicalize (minus (neg A) (mult B C)) to
2506 (minus (mult (neg B) C) A). */
2507 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2508 && GET_CODE (op1) == MULT
2509 && GET_CODE (op0) == NEG)
2513 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2514 in2 = XEXP (op1, 1);
2515 return simplify_gen_binary (MINUS, mode,
2516 simplify_gen_binary (MULT, mode,
2521 /* If one of the operands is a PLUS or a MINUS, see if we can
2522 simplify this by the associative law. This will, for example,
2523 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2524 Don't use the associative law for floating point.
2525 The inaccuracy makes it nonassociative,
2526 and subtle programs can break if operations are associated. */
2528 if (INTEGRAL_MODE_P (mode)
2529 && (plus_minus_operand_p (op0)
2530 || plus_minus_operand_p (op1))
2531 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2536 if (trueop1 == constm1_rtx)
2537 return simplify_gen_unary (NEG, mode, op0, mode);
2539 if (GET_CODE (op0) == NEG)
2541 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2542 /* If op1 is a MULT as well and simplify_unary_operation
2543 just moved the NEG to the second operand, simplify_gen_binary
2544 below could through simplify_associative_operation move
2545 the NEG around again and recurse endlessly. */
2547 && GET_CODE (op1) == MULT
2548 && GET_CODE (temp) == MULT
2549 && XEXP (op1, 0) == XEXP (temp, 0)
2550 && GET_CODE (XEXP (temp, 1)) == NEG
2551 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2554 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2556 if (GET_CODE (op1) == NEG)
2558 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2559 /* If op0 is a MULT as well and simplify_unary_operation
2560 just moved the NEG to the second operand, simplify_gen_binary
2561 below could through simplify_associative_operation move
2562 the NEG around again and recurse endlessly. */
2564 && GET_CODE (op0) == MULT
2565 && GET_CODE (temp) == MULT
2566 && XEXP (op0, 0) == XEXP (temp, 0)
2567 && GET_CODE (XEXP (temp, 1)) == NEG
2568 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2571 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2574 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2575 x is NaN, since x * 0 is then also NaN. Nor is it valid
2576 when the mode has signed zeros, since multiplying a negative
2577 number by 0 will give -0, not 0. */
2578 if (!HONOR_NANS (mode)
2579 && !HONOR_SIGNED_ZEROS (mode)
2580 && trueop1 == CONST0_RTX (mode)
2581 && ! side_effects_p (op0))
2584 /* In IEEE floating point, x*1 is not equivalent to x for
2586 if (!HONOR_SNANS (mode)
2587 && trueop1 == CONST1_RTX (mode))
2590 /* Convert multiply by constant power of two into shift unless
2591 we are still generating RTL. This test is a kludge. */
2592 if (CONST_INT_P (trueop1)
2593 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2594 /* If the mode is larger than the host word size, and the
2595 uppermost bit is set, then this isn't a power of two due
2596 to implicit sign extension. */
2597 && (width <= HOST_BITS_PER_WIDE_INT
2598 || val != HOST_BITS_PER_WIDE_INT - 1))
2599 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2601 /* Likewise for multipliers wider than a word. */
2602 if (CONST_DOUBLE_AS_INT_P (trueop1)
2603 && GET_MODE (op0) == mode
2604 && CONST_DOUBLE_LOW (trueop1) == 0
2605 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2606 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2607 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2608 return simplify_gen_binary (ASHIFT, mode, op0,
2609 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2611 /* x*2 is x+x and x*(-1) is -x */
2612 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2613 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2614 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2615 && GET_MODE (op0) == mode)
2618 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2620 if (REAL_VALUES_EQUAL (d, dconst2))
2621 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2623 if (!HONOR_SNANS (mode)
2624 && REAL_VALUES_EQUAL (d, dconstm1))
2625 return simplify_gen_unary (NEG, mode, op0, mode);
2628 /* Optimize -x * -x as x * x. */
2629 if (FLOAT_MODE_P (mode)
2630 && GET_CODE (op0) == NEG
2631 && GET_CODE (op1) == NEG
2632 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2633 && !side_effects_p (XEXP (op0, 0)))
2634 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2636 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2637 if (SCALAR_FLOAT_MODE_P (mode)
2638 && GET_CODE (op0) == ABS
2639 && GET_CODE (op1) == ABS
2640 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2641 && !side_effects_p (XEXP (op0, 0)))
2642 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2644 /* Reassociate multiplication, but for floating point MULTs
2645 only when the user specifies unsafe math optimizations. */
2646 if (! FLOAT_MODE_P (mode)
2647 || flag_unsafe_math_optimizations)
2649 tem = simplify_associative_operation (code, mode, op0, op1);
2656 if (trueop1 == CONST0_RTX (mode))
2658 if (INTEGRAL_MODE_P (mode)
2659 && trueop1 == CONSTM1_RTX (mode)
2660 && !side_effects_p (op0))
2662 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2664 /* A | (~A) -> -1 */
2665 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2666 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2667 && ! side_effects_p (op0)
2668 && SCALAR_INT_MODE_P (mode))
2671 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2672 if (CONST_INT_P (op1)
2673 && HWI_COMPUTABLE_MODE_P (mode)
2674 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2675 && !side_effects_p (op0))
2678 /* Canonicalize (X & C1) | C2. */
2679 if (GET_CODE (op0) == AND
2680 && CONST_INT_P (trueop1)
2681 && CONST_INT_P (XEXP (op0, 1)))
2683 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2684 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2685 HOST_WIDE_INT c2 = INTVAL (trueop1);
2687 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2689 && !side_effects_p (XEXP (op0, 0)))
2692 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2693 if (((c1|c2) & mask) == mask)
2694 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2696 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2697 if (((c1 & ~c2) & mask) != (c1 & mask))
2699 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2700 gen_int_mode (c1 & ~c2, mode));
2701 return simplify_gen_binary (IOR, mode, tem, op1);
2705 /* Convert (A & B) | A to A. */
2706 if (GET_CODE (op0) == AND
2707 && (rtx_equal_p (XEXP (op0, 0), op1)
2708 || rtx_equal_p (XEXP (op0, 1), op1))
2709 && ! side_effects_p (XEXP (op0, 0))
2710 && ! side_effects_p (XEXP (op0, 1)))
2713 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2714 mode size to (rotate A CX). */
2716 if (GET_CODE (op1) == ASHIFT
2717 || GET_CODE (op1) == SUBREG)
2728 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2729 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2730 && CONST_INT_P (XEXP (opleft, 1))
2731 && CONST_INT_P (XEXP (opright, 1))
2732 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2733 == GET_MODE_PRECISION (mode)))
2734 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2736 /* Same, but for ashift that has been "simplified" to a wider mode
2737 by simplify_shift_const. */
2739 if (GET_CODE (opleft) == SUBREG
2740 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2741 && GET_CODE (opright) == LSHIFTRT
2742 && GET_CODE (XEXP (opright, 0)) == SUBREG
2743 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2744 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2745 && (GET_MODE_SIZE (GET_MODE (opleft))
2746 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2747 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2748 SUBREG_REG (XEXP (opright, 0)))
2749 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2750 && CONST_INT_P (XEXP (opright, 1))
2751 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2752 == GET_MODE_PRECISION (mode)))
2753 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2754 XEXP (SUBREG_REG (opleft), 1));
2756 /* If we have (ior (and (X C1) C2)), simplify this by making
2757 C1 as small as possible if C1 actually changes. */
2758 if (CONST_INT_P (op1)
2759 && (HWI_COMPUTABLE_MODE_P (mode)
2760 || INTVAL (op1) > 0)
2761 && GET_CODE (op0) == AND
2762 && CONST_INT_P (XEXP (op0, 1))
2763 && CONST_INT_P (op1)
2764 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2765 return simplify_gen_binary (IOR, mode,
2767 (AND, mode, XEXP (op0, 0),
2768 GEN_INT (UINTVAL (XEXP (op0, 1))
2772 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2773 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2774 the PLUS does not affect any of the bits in OP1: then we can do
2775 the IOR as a PLUS and we can associate. This is valid if OP1
2776 can be safely shifted left C bits. */
2777 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2778 && GET_CODE (XEXP (op0, 0)) == PLUS
2779 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2780 && CONST_INT_P (XEXP (op0, 1))
2781 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2783 int count = INTVAL (XEXP (op0, 1));
2784 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2786 if (mask >> count == INTVAL (trueop1)
2787 && trunc_int_for_mode (mask, mode) == mask
2788 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2789 return simplify_gen_binary (ASHIFTRT, mode,
2790 plus_constant (mode, XEXP (op0, 0),
2795 tem = simplify_associative_operation (code, mode, op0, op1);
2801 if (trueop1 == CONST0_RTX (mode))
2803 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2804 return simplify_gen_unary (NOT, mode, op0, mode);
2805 if (rtx_equal_p (trueop0, trueop1)
2806 && ! side_effects_p (op0)
2807 && GET_MODE_CLASS (mode) != MODE_CC)
2808 return CONST0_RTX (mode);
2810 /* Canonicalize XOR of the most significant bit to PLUS. */
2811 if (CONST_SCALAR_INT_P (op1)
2812 && mode_signbit_p (mode, op1))
2813 return simplify_gen_binary (PLUS, mode, op0, op1);
2814 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2815 if (CONST_SCALAR_INT_P (op1)
2816 && GET_CODE (op0) == PLUS
2817 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2818 && mode_signbit_p (mode, XEXP (op0, 1)))
2819 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2820 simplify_gen_binary (XOR, mode, op1,
2823 /* If we are XORing two things that have no bits in common,
2824 convert them into an IOR. This helps to detect rotation encoded
2825 using those methods and possibly other simplifications. */
2827 if (HWI_COMPUTABLE_MODE_P (mode)
2828 && (nonzero_bits (op0, mode)
2829 & nonzero_bits (op1, mode)) == 0)
2830 return (simplify_gen_binary (IOR, mode, op0, op1));
2832 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2833 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2836 int num_negated = 0;
2838 if (GET_CODE (op0) == NOT)
2839 num_negated++, op0 = XEXP (op0, 0);
2840 if (GET_CODE (op1) == NOT)
2841 num_negated++, op1 = XEXP (op1, 0);
2843 if (num_negated == 2)
2844 return simplify_gen_binary (XOR, mode, op0, op1);
2845 else if (num_negated == 1)
2846 return simplify_gen_unary (NOT, mode,
2847 simplify_gen_binary (XOR, mode, op0, op1),
2851 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2852 correspond to a machine insn or result in further simplifications
2853 if B is a constant. */
2855 if (GET_CODE (op0) == AND
2856 && rtx_equal_p (XEXP (op0, 1), op1)
2857 && ! side_effects_p (op1))
2858 return simplify_gen_binary (AND, mode,
2859 simplify_gen_unary (NOT, mode,
2860 XEXP (op0, 0), mode),
2863 else if (GET_CODE (op0) == AND
2864 && rtx_equal_p (XEXP (op0, 0), op1)
2865 && ! side_effects_p (op1))
2866 return simplify_gen_binary (AND, mode,
2867 simplify_gen_unary (NOT, mode,
2868 XEXP (op0, 1), mode),
2871 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2872 we can transform like this:
2873 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2874 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2875 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2876 Attempt a few simplifications when B and C are both constants. */
2877 if (GET_CODE (op0) == AND
2878 && CONST_INT_P (op1)
2879 && CONST_INT_P (XEXP (op0, 1)))
2881 rtx a = XEXP (op0, 0);
2882 rtx b = XEXP (op0, 1);
2884 HOST_WIDE_INT bval = INTVAL (b);
2885 HOST_WIDE_INT cval = INTVAL (c);
2888 = simplify_binary_operation (AND, mode,
2889 simplify_gen_unary (NOT, mode, a, mode),
2891 if ((~cval & bval) == 0)
2893 /* Try to simplify ~A&C | ~B&C. */
2894 if (na_c != NULL_RTX)
2895 return simplify_gen_binary (IOR, mode, na_c,
2896 GEN_INT (~bval & cval));
2900 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2901 if (na_c == const0_rtx)
2903 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2904 GEN_INT (~cval & bval));
2905 return simplify_gen_binary (IOR, mode, a_nc_b,
2906 GEN_INT (~bval & cval));
2911 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2912 comparison if STORE_FLAG_VALUE is 1. */
2913 if (STORE_FLAG_VALUE == 1
2914 && trueop1 == const1_rtx
2915 && COMPARISON_P (op0)
2916 && (reversed = reversed_comparison (op0, mode)))
2919 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2920 is (lt foo (const_int 0)), so we can perform the above
2921 simplification if STORE_FLAG_VALUE is 1. */
2923 if (STORE_FLAG_VALUE == 1
2924 && trueop1 == const1_rtx
2925 && GET_CODE (op0) == LSHIFTRT
2926 && CONST_INT_P (XEXP (op0, 1))
2927 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2928 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2930 /* (xor (comparison foo bar) (const_int sign-bit))
2931 when STORE_FLAG_VALUE is the sign bit. */
2932 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2933 && trueop1 == const_true_rtx
2934 && COMPARISON_P (op0)
2935 && (reversed = reversed_comparison (op0, mode)))
2938 tem = simplify_associative_operation (code, mode, op0, op1);
2944 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2946 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2948 if (HWI_COMPUTABLE_MODE_P (mode))
2950 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2951 HOST_WIDE_INT nzop1;
2952 if (CONST_INT_P (trueop1))
2954 HOST_WIDE_INT val1 = INTVAL (trueop1);
2955 /* If we are turning off bits already known off in OP0, we need
2957 if ((nzop0 & ~val1) == 0)
2960 nzop1 = nonzero_bits (trueop1, mode);
2961 /* If we are clearing all the nonzero bits, the result is zero. */
2962 if ((nzop1 & nzop0) == 0
2963 && !side_effects_p (op0) && !side_effects_p (op1))
2964 return CONST0_RTX (mode);
2966 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2967 && GET_MODE_CLASS (mode) != MODE_CC)
2970 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2971 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2972 && ! side_effects_p (op0)
2973 && GET_MODE_CLASS (mode) != MODE_CC)
2974 return CONST0_RTX (mode);
2976 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2977 there are no nonzero bits of C outside of X's mode. */
2978 if ((GET_CODE (op0) == SIGN_EXTEND
2979 || GET_CODE (op0) == ZERO_EXTEND)
2980 && CONST_INT_P (trueop1)
2981 && HWI_COMPUTABLE_MODE_P (mode)
2982 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2983 & UINTVAL (trueop1)) == 0)
2985 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2986 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2987 gen_int_mode (INTVAL (trueop1),
2989 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2992 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2993 we might be able to further simplify the AND with X and potentially
2994 remove the truncation altogether. */
2995 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2997 rtx x = XEXP (op0, 0);
2998 enum machine_mode xmode = GET_MODE (x);
2999 tem = simplify_gen_binary (AND, xmode, x,
3000 gen_int_mode (INTVAL (trueop1), xmode));
3001 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3004 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3005 if (GET_CODE (op0) == IOR
3006 && CONST_INT_P (trueop1)
3007 && CONST_INT_P (XEXP (op0, 1)))
3009 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3010 return simplify_gen_binary (IOR, mode,
3011 simplify_gen_binary (AND, mode,
3012 XEXP (op0, 0), op1),
3013 gen_int_mode (tmp, mode));
3016 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3017 insn (and may simplify more). */
3018 if (GET_CODE (op0) == XOR
3019 && rtx_equal_p (XEXP (op0, 0), op1)
3020 && ! side_effects_p (op1))
3021 return simplify_gen_binary (AND, mode,
3022 simplify_gen_unary (NOT, mode,
3023 XEXP (op0, 1), mode),
3026 if (GET_CODE (op0) == XOR
3027 && rtx_equal_p (XEXP (op0, 1), op1)
3028 && ! side_effects_p (op1))
3029 return simplify_gen_binary (AND, mode,
3030 simplify_gen_unary (NOT, mode,
3031 XEXP (op0, 0), mode),
3034 /* Similarly for (~(A ^ B)) & A. */
3035 if (GET_CODE (op0) == NOT
3036 && GET_CODE (XEXP (op0, 0)) == XOR
3037 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3038 && ! side_effects_p (op1))
3039 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3041 if (GET_CODE (op0) == NOT
3042 && GET_CODE (XEXP (op0, 0)) == XOR
3043 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3044 && ! side_effects_p (op1))
3045 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3047 /* Convert (A | B) & A to A. */
3048 if (GET_CODE (op0) == IOR
3049 && (rtx_equal_p (XEXP (op0, 0), op1)
3050 || rtx_equal_p (XEXP (op0, 1), op1))
3051 && ! side_effects_p (XEXP (op0, 0))
3052 && ! side_effects_p (XEXP (op0, 1)))
3055 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3056 ((A & N) + B) & M -> (A + B) & M
3057 Similarly if (N & M) == 0,
3058 ((A | N) + B) & M -> (A + B) & M
3059 and for - instead of + and/or ^ instead of |.
3060 Also, if (N & M) == 0, then
3061 (A +- N) & M -> A & M. */
3062 if (CONST_INT_P (trueop1)
3063 && HWI_COMPUTABLE_MODE_P (mode)
3064 && ~UINTVAL (trueop1)
3065 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3066 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3071 pmop[0] = XEXP (op0, 0);
3072 pmop[1] = XEXP (op0, 1);
3074 if (CONST_INT_P (pmop[1])
3075 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3076 return simplify_gen_binary (AND, mode, pmop[0], op1);
3078 for (which = 0; which < 2; which++)
3081 switch (GET_CODE (tem))
3084 if (CONST_INT_P (XEXP (tem, 1))
3085 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3086 == UINTVAL (trueop1))
3087 pmop[which] = XEXP (tem, 0);
3091 if (CONST_INT_P (XEXP (tem, 1))
3092 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3093 pmop[which] = XEXP (tem, 0);
3100 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3102 tem = simplify_gen_binary (GET_CODE (op0), mode,
3104 return simplify_gen_binary (code, mode, tem, op1);
3108 /* (and X (ior (not X) Y) -> (and X Y) */
3109 if (GET_CODE (op1) == IOR
3110 && GET_CODE (XEXP (op1, 0)) == NOT
3111 && op0 == XEXP (XEXP (op1, 0), 0))
3112 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3114 /* (and (ior (not X) Y) X) -> (and X Y) */
3115 if (GET_CODE (op0) == IOR
3116 && GET_CODE (XEXP (op0, 0)) == NOT
3117 && op1 == XEXP (XEXP (op0, 0), 0))
3118 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3120 tem = simplify_associative_operation (code, mode, op0, op1);
3126 /* 0/x is 0 (or x&0 if x has side-effects). */
3127 if (trueop0 == CONST0_RTX (mode))
3129 if (side_effects_p (op1))
3130 return simplify_gen_binary (AND, mode, op1, trueop0);
3134 if (trueop1 == CONST1_RTX (mode))
3136 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3140 /* Convert divide by power of two into shift. */
3141 if (CONST_INT_P (trueop1)
3142 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3143 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3147 /* Handle floating point and integers separately. */
3148 if (SCALAR_FLOAT_MODE_P (mode))
3150 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3151 safe for modes with NaNs, since 0.0 / 0.0 will then be
3152 NaN rather than 0.0. Nor is it safe for modes with signed
3153 zeros, since dividing 0 by a negative number gives -0.0 */
3154 if (trueop0 == CONST0_RTX (mode)
3155 && !HONOR_NANS (mode)
3156 && !HONOR_SIGNED_ZEROS (mode)
3157 && ! side_effects_p (op1))
3160 if (trueop1 == CONST1_RTX (mode)
3161 && !HONOR_SNANS (mode))
3164 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3165 && trueop1 != CONST0_RTX (mode))
3168 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3171 if (REAL_VALUES_EQUAL (d, dconstm1)
3172 && !HONOR_SNANS (mode))
3173 return simplify_gen_unary (NEG, mode, op0, mode);
3175 /* Change FP division by a constant into multiplication.
3176 Only do this with -freciprocal-math. */
3177 if (flag_reciprocal_math
3178 && !REAL_VALUES_EQUAL (d, dconst0))
3180 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3181 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3182 return simplify_gen_binary (MULT, mode, op0, tem);
3186 else if (SCALAR_INT_MODE_P (mode))
3188 /* 0/x is 0 (or x&0 if x has side-effects). */
3189 if (trueop0 == CONST0_RTX (mode)
3190 && !cfun->can_throw_non_call_exceptions)
3192 if (side_effects_p (op1))
3193 return simplify_gen_binary (AND, mode, op1, trueop0);
3197 if (trueop1 == CONST1_RTX (mode))
3199 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3204 if (trueop1 == constm1_rtx)
3206 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3208 return simplify_gen_unary (NEG, mode, x, mode);
3214 /* 0%x is 0 (or x&0 if x has side-effects). */
3215 if (trueop0 == CONST0_RTX (mode))
3217 if (side_effects_p (op1))
3218 return simplify_gen_binary (AND, mode, op1, trueop0);
3221 /* x%1 is 0 (of x&0 if x has side-effects). */
3222 if (trueop1 == CONST1_RTX (mode))
3224 if (side_effects_p (op0))
3225 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3226 return CONST0_RTX (mode);
3228 /* Implement modulus by power of two as AND. */
3229 if (CONST_INT_P (trueop1)
3230 && exact_log2 (UINTVAL (trueop1)) > 0)
3231 return simplify_gen_binary (AND, mode, op0,
3232 GEN_INT (INTVAL (op1) - 1));
3236 /* 0%x is 0 (or x&0 if x has side-effects). */
3237 if (trueop0 == CONST0_RTX (mode))
3239 if (side_effects_p (op1))
3240 return simplify_gen_binary (AND, mode, op1, trueop0);
3243 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3244 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3246 if (side_effects_p (op0))
3247 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3248 return CONST0_RTX (mode);
3255 if (trueop1 == CONST0_RTX (mode))
3257 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3259 /* Rotating ~0 always results in ~0. */
3260 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3261 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3262 && ! side_effects_p (op1))
3265 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3267 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3268 if (val != INTVAL (op1))
3269 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3276 if (trueop1 == CONST0_RTX (mode))
3278 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3280 goto canonicalize_shift;
3283 if (trueop1 == CONST0_RTX (mode))
3285 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3287 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3288 if (GET_CODE (op0) == CLZ
3289 && CONST_INT_P (trueop1)
3290 && STORE_FLAG_VALUE == 1
3291 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3293 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3294 unsigned HOST_WIDE_INT zero_val = 0;
3296 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3297 && zero_val == GET_MODE_PRECISION (imode)
3298 && INTVAL (trueop1) == exact_log2 (zero_val))
3299 return simplify_gen_relational (EQ, mode, imode,
3300 XEXP (op0, 0), const0_rtx);
3302 goto canonicalize_shift;
3305 if (width <= HOST_BITS_PER_WIDE_INT
3306 && mode_signbit_p (mode, trueop1)
3307 && ! side_effects_p (op0))
3309 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3311 tem = simplify_associative_operation (code, mode, op0, op1);
3317 if (width <= HOST_BITS_PER_WIDE_INT
3318 && CONST_INT_P (trueop1)
3319 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3320 && ! side_effects_p (op0))
3322 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3324 tem = simplify_associative_operation (code, mode, op0, op1);
3330 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3332 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3334 tem = simplify_associative_operation (code, mode, op0, op1);
3340 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3342 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3344 tem = simplify_associative_operation (code, mode, op0, op1);
3357 /* ??? There are simplifications that can be done. */
3361 if (!VECTOR_MODE_P (mode))
3363 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3364 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3365 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3366 gcc_assert (XVECLEN (trueop1, 0) == 1);
3367 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3369 if (GET_CODE (trueop0) == CONST_VECTOR)
3370 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3373 /* Extract a scalar element from a nested VEC_SELECT expression
3374 (with optional nested VEC_CONCAT expression). Some targets
3375 (i386) extract scalar element from a vector using chain of
3376 nested VEC_SELECT expressions. When input operand is a memory
3377 operand, this operation can be simplified to a simple scalar
3378 load from an offseted memory address. */
3379 if (GET_CODE (trueop0) == VEC_SELECT)
3381 rtx op0 = XEXP (trueop0, 0);
3382 rtx op1 = XEXP (trueop0, 1);
3384 enum machine_mode opmode = GET_MODE (op0);
3385 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3386 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3388 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3394 gcc_assert (GET_CODE (op1) == PARALLEL);
3395 gcc_assert (i < n_elts);
3397 /* Select element, pointed by nested selector. */
3398 elem = INTVAL (XVECEXP (op1, 0, i));
3400 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3401 if (GET_CODE (op0) == VEC_CONCAT)
3403 rtx op00 = XEXP (op0, 0);
3404 rtx op01 = XEXP (op0, 1);
3406 enum machine_mode mode00, mode01;
3407 int n_elts00, n_elts01;
3409 mode00 = GET_MODE (op00);
3410 mode01 = GET_MODE (op01);
3412 /* Find out number of elements of each operand. */
3413 if (VECTOR_MODE_P (mode00))
3415 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3416 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3421 if (VECTOR_MODE_P (mode01))
3423 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3424 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3429 gcc_assert (n_elts == n_elts00 + n_elts01);
3431 /* Select correct operand of VEC_CONCAT
3432 and adjust selector. */
3433 if (elem < n_elts01)
3444 vec = rtvec_alloc (1);
3445 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3447 tmp = gen_rtx_fmt_ee (code, mode,
3448 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3451 if (GET_CODE (trueop0) == VEC_DUPLICATE
3452 && GET_MODE (XEXP (trueop0, 0)) == mode)
3453 return XEXP (trueop0, 0);
3457 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3458 gcc_assert (GET_MODE_INNER (mode)
3459 == GET_MODE_INNER (GET_MODE (trueop0)));
3460 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3462 if (GET_CODE (trueop0) == CONST_VECTOR)
3464 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3465 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3466 rtvec v = rtvec_alloc (n_elts);
3469 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3470 for (i = 0; i < n_elts; i++)
3472 rtx x = XVECEXP (trueop1, 0, i);
3474 gcc_assert (CONST_INT_P (x));
3475 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3479 return gen_rtx_CONST_VECTOR (mode, v);
3482 /* Recognize the identity. */
3483 if (GET_MODE (trueop0) == mode)
3485 bool maybe_ident = true;
3486 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3488 rtx j = XVECEXP (trueop1, 0, i);
3489 if (!CONST_INT_P (j) || INTVAL (j) != i)
3491 maybe_ident = false;
3499 /* If we build {a,b} then permute it, build the result directly. */
3500 if (XVECLEN (trueop1, 0) == 2
3501 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3502 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3503 && GET_CODE (trueop0) == VEC_CONCAT
3504 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3505 && GET_MODE (XEXP (trueop0, 0)) == mode
3506 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3507 && GET_MODE (XEXP (trueop0, 1)) == mode)
3509 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3510 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3513 gcc_assert (i0 < 4 && i1 < 4);
3514 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3515 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3517 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3520 if (XVECLEN (trueop1, 0) == 2
3521 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3522 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3523 && GET_CODE (trueop0) == VEC_CONCAT
3524 && GET_MODE (trueop0) == mode)
3526 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3527 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3530 gcc_assert (i0 < 2 && i1 < 2);
3531 subop0 = XEXP (trueop0, i0);
3532 subop1 = XEXP (trueop0, i1);
3534 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3538 if (XVECLEN (trueop1, 0) == 1
3539 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3540 && GET_CODE (trueop0) == VEC_CONCAT)
3543 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3545 /* Try to find the element in the VEC_CONCAT. */
3546 while (GET_MODE (vec) != mode
3547 && GET_CODE (vec) == VEC_CONCAT)
3549 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3550 if (offset < vec_size)
3551 vec = XEXP (vec, 0);
3555 vec = XEXP (vec, 1);
3557 vec = avoid_constant_pool_reference (vec);
3560 if (GET_MODE (vec) == mode)
3567 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3568 ? GET_MODE (trueop0)
3569 : GET_MODE_INNER (mode));
3570 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3571 ? GET_MODE (trueop1)
3572 : GET_MODE_INNER (mode));
3574 gcc_assert (VECTOR_MODE_P (mode));
3575 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3576 == GET_MODE_SIZE (mode));
3578 if (VECTOR_MODE_P (op0_mode))
3579 gcc_assert (GET_MODE_INNER (mode)
3580 == GET_MODE_INNER (op0_mode));
3582 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3584 if (VECTOR_MODE_P (op1_mode))
3585 gcc_assert (GET_MODE_INNER (mode)
3586 == GET_MODE_INNER (op1_mode));
3588 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3590 if ((GET_CODE (trueop0) == CONST_VECTOR
3591 || CONST_SCALAR_INT_P (trueop0)
3592 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3593 && (GET_CODE (trueop1) == CONST_VECTOR
3594 || CONST_SCALAR_INT_P (trueop1)
3595 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3597 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3598 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3599 rtvec v = rtvec_alloc (n_elts);
3601 unsigned in_n_elts = 1;
3603 if (VECTOR_MODE_P (op0_mode))
3604 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3605 for (i = 0; i < n_elts; i++)
3609 if (!VECTOR_MODE_P (op0_mode))
3610 RTVEC_ELT (v, i) = trueop0;
3612 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3616 if (!VECTOR_MODE_P (op1_mode))
3617 RTVEC_ELT (v, i) = trueop1;
3619 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3624 return gen_rtx_CONST_VECTOR (mode, v);
3627 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3628 if (GET_CODE (trueop0) == VEC_SELECT
3629 && GET_CODE (trueop1) == VEC_SELECT
3630 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3632 rtx par0 = XEXP (trueop0, 1);
3633 rtx par1 = XEXP (trueop1, 1);
3634 int len0 = XVECLEN (par0, 0);
3635 int len1 = XVECLEN (par1, 0);
3636 rtvec vec = rtvec_alloc (len0 + len1);
3637 for (int i = 0; i < len0; i++)
3638 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3639 for (int i = 0; i < len1; i++)
3640 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3641 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3642 gen_rtx_PARALLEL (VOIDmode, vec));
3655 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3658 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3660 unsigned int width = GET_MODE_PRECISION (mode);
3662 if (VECTOR_MODE_P (mode)
3663 && code != VEC_CONCAT
3664 && GET_CODE (op0) == CONST_VECTOR
3665 && GET_CODE (op1) == CONST_VECTOR)
3667 unsigned n_elts = GET_MODE_NUNITS (mode);
3668 enum machine_mode op0mode = GET_MODE (op0);
3669 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3670 enum machine_mode op1mode = GET_MODE (op1);
3671 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3672 rtvec v = rtvec_alloc (n_elts);
3675 gcc_assert (op0_n_elts == n_elts);
3676 gcc_assert (op1_n_elts == n_elts);
3677 for (i = 0; i < n_elts; i++)
3679 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3680 CONST_VECTOR_ELT (op0, i),
3681 CONST_VECTOR_ELT (op1, i));
3684 RTVEC_ELT (v, i) = x;
3687 return gen_rtx_CONST_VECTOR (mode, v);
3690 if (VECTOR_MODE_P (mode)
3691 && code == VEC_CONCAT
3692 && (CONST_SCALAR_INT_P (op0)
3693 || GET_CODE (op0) == CONST_FIXED
3694 || CONST_DOUBLE_AS_FLOAT_P (op0))
3695 && (CONST_SCALAR_INT_P (op1)
3696 || CONST_DOUBLE_AS_FLOAT_P (op1)
3697 || GET_CODE (op1) == CONST_FIXED))
3699 unsigned n_elts = GET_MODE_NUNITS (mode);
3700 rtvec v = rtvec_alloc (n_elts);
3702 gcc_assert (n_elts >= 2);
3705 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3706 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3708 RTVEC_ELT (v, 0) = op0;
3709 RTVEC_ELT (v, 1) = op1;
3713 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3714 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3717 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3718 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3719 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3721 for (i = 0; i < op0_n_elts; ++i)
3722 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3723 for (i = 0; i < op1_n_elts; ++i)
3724 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3727 return gen_rtx_CONST_VECTOR (mode, v);
3730 if (SCALAR_FLOAT_MODE_P (mode)
3731 && CONST_DOUBLE_AS_FLOAT_P (op0)
3732 && CONST_DOUBLE_AS_FLOAT_P (op1)
3733 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3744 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3746 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3748 for (i = 0; i < 4; i++)
3765 real_from_target (&r, tmp0, mode);
3766 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3770 REAL_VALUE_TYPE f0, f1, value, result;
3773 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3774 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3775 real_convert (&f0, mode, &f0);
3776 real_convert (&f1, mode, &f1);
3778 if (HONOR_SNANS (mode)
3779 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3783 && REAL_VALUES_EQUAL (f1, dconst0)
3784 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3787 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3788 && flag_trapping_math
3789 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3791 int s0 = REAL_VALUE_NEGATIVE (f0);
3792 int s1 = REAL_VALUE_NEGATIVE (f1);
3797 /* Inf + -Inf = NaN plus exception. */
3802 /* Inf - Inf = NaN plus exception. */
3807 /* Inf / Inf = NaN plus exception. */
3814 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3815 && flag_trapping_math
3816 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3817 || (REAL_VALUE_ISINF (f1)
3818 && REAL_VALUES_EQUAL (f0, dconst0))))
3819 /* Inf * 0 = NaN plus exception. */
3822 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3824 real_convert (&result, mode, &value);
3826 /* Don't constant fold this floating point operation if
3827 the result has overflowed and flag_trapping_math. */
3829 if (flag_trapping_math
3830 && MODE_HAS_INFINITIES (mode)
3831 && REAL_VALUE_ISINF (result)
3832 && !REAL_VALUE_ISINF (f0)
3833 && !REAL_VALUE_ISINF (f1))
3834 /* Overflow plus exception. */
3837 /* Don't constant fold this floating point operation if the
3838 result may dependent upon the run-time rounding mode and
3839 flag_rounding_math is set, or if GCC's software emulation
3840 is unable to accurately represent the result. */
3842 if ((flag_rounding_math
3843 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3844 && (inexact || !real_identical (&result, &value)))
3847 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3851 /* We can fold some multi-word operations. */
3852 if (GET_MODE_CLASS (mode) == MODE_INT
3853 && width == HOST_BITS_PER_DOUBLE_INT
3854 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3855 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3857 double_int o0, o1, res, tmp;
3860 o0 = rtx_to_double_int (op0);
3861 o1 = rtx_to_double_int (op1);
3866 /* A - B == A + (-B). */
3869 /* Fall through.... */
3880 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3887 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3894 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3901 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3935 case LSHIFTRT: case ASHIFTRT:
3937 case ROTATE: case ROTATERT:
3939 unsigned HOST_WIDE_INT cnt;
3941 if (SHIFT_COUNT_TRUNCATED)
3944 o1.low &= GET_MODE_PRECISION (mode) - 1;
3947 if (!o1.fits_uhwi ()
3948 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3951 cnt = o1.to_uhwi ();
3952 unsigned short prec = GET_MODE_PRECISION (mode);
3954 if (code == LSHIFTRT || code == ASHIFTRT)
3955 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3956 else if (code == ASHIFT)
3957 res = o0.alshift (cnt, prec);
3958 else if (code == ROTATE)
3959 res = o0.lrotate (cnt, prec);
3960 else /* code == ROTATERT */
3961 res = o0.rrotate (cnt, prec);
3969 return immed_double_int_const (res, mode);
3972 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3973 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3975 /* Get the integer argument values in two forms:
3976 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3978 arg0 = INTVAL (op0);
3979 arg1 = INTVAL (op1);
3981 if (width < HOST_BITS_PER_WIDE_INT)
3983 arg0 &= GET_MODE_MASK (mode);
3984 arg1 &= GET_MODE_MASK (mode);
3987 if (val_signbit_known_set_p (mode, arg0s))
3988 arg0s |= ~GET_MODE_MASK (mode);
3991 if (val_signbit_known_set_p (mode, arg1s))
3992 arg1s |= ~GET_MODE_MASK (mode);
4000 /* Compute the value of the arithmetic. */
4005 val = arg0s + arg1s;
4009 val = arg0s - arg1s;
4013 val = arg0s * arg1s;
4018 || ((unsigned HOST_WIDE_INT) arg0s
4019 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4022 val = arg0s / arg1s;
4027 || ((unsigned HOST_WIDE_INT) arg0s
4028 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4031 val = arg0s % arg1s;
4036 || ((unsigned HOST_WIDE_INT) arg0s
4037 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4040 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4045 || ((unsigned HOST_WIDE_INT) arg0s
4046 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4049 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4067 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4068 the value is in range. We can't return any old value for
4069 out-of-range arguments because either the middle-end (via
4070 shift_truncation_mask) or the back-end might be relying on
4071 target-specific knowledge. Nor can we rely on
4072 shift_truncation_mask, since the shift might not be part of an
4073 ashlM3, lshrM3 or ashrM3 instruction. */
4074 if (SHIFT_COUNT_TRUNCATED)
4075 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4076 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4079 val = (code == ASHIFT
4080 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4081 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4083 /* Sign-extend the result for arithmetic right shifts. */
4084 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4085 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4093 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4094 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4102 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4103 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4107 /* Do nothing here. */
4111 val = arg0s <= arg1s ? arg0s : arg1s;
4115 val = ((unsigned HOST_WIDE_INT) arg0
4116 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4120 val = arg0s > arg1s ? arg0s : arg1s;
4124 val = ((unsigned HOST_WIDE_INT) arg0
4125 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4138 /* ??? There are simplifications that can be done. */
4145 return gen_int_mode (val, mode);
4153 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4156 Rather than test for specific case, we do this by a brute-force method
4157 and do all possible simplifications until no more changes occur. Then
4158 we rebuild the operation. */
4160 struct simplify_plus_minus_op_data
4167 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4171 result = (commutative_operand_precedence (y)
4172 - commutative_operand_precedence (x));
4176 /* Group together equal REGs to do more simplification. */
4177 if (REG_P (x) && REG_P (y))
4178 return REGNO (x) > REGNO (y);
4184 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4187 struct simplify_plus_minus_op_data ops[8];
4189 int n_ops = 2, input_ops = 2;
4190 int changed, n_constants = 0, canonicalized = 0;
4193 memset (ops, 0, sizeof ops);
4195 /* Set up the two operands and then expand them until nothing has been
4196 changed. If we run out of room in our array, give up; this should
4197 almost never happen. */
4202 ops[1].neg = (code == MINUS);
4208 for (i = 0; i < n_ops; i++)
4210 rtx this_op = ops[i].op;
4211 int this_neg = ops[i].neg;
4212 enum rtx_code this_code = GET_CODE (this_op);
4221 ops[n_ops].op = XEXP (this_op, 1);
4222 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4225 ops[i].op = XEXP (this_op, 0);
4228 canonicalized |= this_neg;
4232 ops[i].op = XEXP (this_op, 0);
4233 ops[i].neg = ! this_neg;
4240 && GET_CODE (XEXP (this_op, 0)) == PLUS
4241 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4242 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4244 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4245 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4246 ops[n_ops].neg = this_neg;
4254 /* ~a -> (-a - 1) */
4257 ops[n_ops].op = CONSTM1_RTX (mode);
4258 ops[n_ops++].neg = this_neg;
4259 ops[i].op = XEXP (this_op, 0);
4260 ops[i].neg = !this_neg;
4270 ops[i].op = neg_const_int (mode, this_op);
4284 if (n_constants > 1)
4287 gcc_assert (n_ops >= 2);
4289 /* If we only have two operands, we can avoid the loops. */
4292 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4295 /* Get the two operands. Be careful with the order, especially for
4296 the cases where code == MINUS. */
4297 if (ops[0].neg && ops[1].neg)
4299 lhs = gen_rtx_NEG (mode, ops[0].op);
4302 else if (ops[0].neg)
4313 return simplify_const_binary_operation (code, mode, lhs, rhs);
4316 /* Now simplify each pair of operands until nothing changes. */
4319 /* Insertion sort is good enough for an eight-element array. */
4320 for (i = 1; i < n_ops; i++)
4322 struct simplify_plus_minus_op_data save;
4324 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4330 ops[j + 1] = ops[j];
4331 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4336 for (i = n_ops - 1; i > 0; i--)
4337 for (j = i - 1; j >= 0; j--)
4339 rtx lhs = ops[j].op, rhs = ops[i].op;
4340 int lneg = ops[j].neg, rneg = ops[i].neg;
4342 if (lhs != 0 && rhs != 0)
4344 enum rtx_code ncode = PLUS;
4350 tem = lhs, lhs = rhs, rhs = tem;
4352 else if (swap_commutative_operands_p (lhs, rhs))
4353 tem = lhs, lhs = rhs, rhs = tem;
4355 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4356 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4358 rtx tem_lhs, tem_rhs;
4360 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4361 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4362 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4364 if (tem && !CONSTANT_P (tem))
4365 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4368 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4370 /* Reject "simplifications" that just wrap the two
4371 arguments in a CONST. Failure to do so can result
4372 in infinite recursion with simplify_binary_operation
4373 when it calls us to simplify CONST operations. */
4375 && ! (GET_CODE (tem) == CONST
4376 && GET_CODE (XEXP (tem, 0)) == ncode
4377 && XEXP (XEXP (tem, 0), 0) == lhs
4378 && XEXP (XEXP (tem, 0), 1) == rhs))
4381 if (GET_CODE (tem) == NEG)
4382 tem = XEXP (tem, 0), lneg = !lneg;
4383 if (CONST_INT_P (tem) && lneg)
4384 tem = neg_const_int (mode, tem), lneg = 0;
4388 ops[j].op = NULL_RTX;
4395 /* If nothing changed, fail. */
4399 /* Pack all the operands to the lower-numbered entries. */
4400 for (i = 0, j = 0; j < n_ops; j++)
4410 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4412 && CONST_INT_P (ops[1].op)
4413 && CONSTANT_P (ops[0].op)
4415 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4417 /* We suppressed creation of trivial CONST expressions in the
4418 combination loop to avoid recursion. Create one manually now.
4419 The combination loop should have ensured that there is exactly
4420 one CONST_INT, and the sort will have ensured that it is last
4421 in the array and that any other constant will be next-to-last. */
4424 && CONST_INT_P (ops[n_ops - 1].op)
4425 && CONSTANT_P (ops[n_ops - 2].op))
4427 rtx value = ops[n_ops - 1].op;
4428 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4429 value = neg_const_int (mode, value);
4430 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4435 /* Put a non-negated operand first, if possible. */
4437 for (i = 0; i < n_ops && ops[i].neg; i++)
4440 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4449 /* Now make the result by performing the requested operations. */
4451 for (i = 1; i < n_ops; i++)
4452 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4453 mode, result, ops[i].op);
4458 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4460 plus_minus_operand_p (const_rtx x)
4462 return GET_CODE (x) == PLUS
4463 || GET_CODE (x) == MINUS
4464 || (GET_CODE (x) == CONST
4465 && GET_CODE (XEXP (x, 0)) == PLUS
4466 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4467 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4470 /* Like simplify_binary_operation except used for relational operators.
4471 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4472 not also be VOIDmode.
4474 CMP_MODE specifies in which mode the comparison is done in, so it is
4475 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4476 the operands or, if both are VOIDmode, the operands are compared in
4477 "infinite precision". */
4479 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4480 enum machine_mode cmp_mode, rtx op0, rtx op1)
4482 rtx tem, trueop0, trueop1;
4484 if (cmp_mode == VOIDmode)
4485 cmp_mode = GET_MODE (op0);
4486 if (cmp_mode == VOIDmode)
4487 cmp_mode = GET_MODE (op1);
4489 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4492 if (SCALAR_FLOAT_MODE_P (mode))
4494 if (tem == const0_rtx)
4495 return CONST0_RTX (mode);
4496 #ifdef FLOAT_STORE_FLAG_VALUE
4498 REAL_VALUE_TYPE val;
4499 val = FLOAT_STORE_FLAG_VALUE (mode);
4500 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4506 if (VECTOR_MODE_P (mode))
4508 if (tem == const0_rtx)
4509 return CONST0_RTX (mode);
4510 #ifdef VECTOR_STORE_FLAG_VALUE
4515 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4516 if (val == NULL_RTX)
4518 if (val == const1_rtx)
4519 return CONST1_RTX (mode);
4521 units = GET_MODE_NUNITS (mode);
4522 v = rtvec_alloc (units);
4523 for (i = 0; i < units; i++)
4524 RTVEC_ELT (v, i) = val;
4525 return gen_rtx_raw_CONST_VECTOR (mode, v);
4535 /* For the following tests, ensure const0_rtx is op1. */
4536 if (swap_commutative_operands_p (op0, op1)
4537 || (op0 == const0_rtx && op1 != const0_rtx))
4538 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4540 /* If op0 is a compare, extract the comparison arguments from it. */
4541 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4542 return simplify_gen_relational (code, mode, VOIDmode,
4543 XEXP (op0, 0), XEXP (op0, 1));
4545 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4549 trueop0 = avoid_constant_pool_reference (op0);
4550 trueop1 = avoid_constant_pool_reference (op1);
4551 return simplify_relational_operation_1 (code, mode, cmp_mode,
4555 /* This part of simplify_relational_operation is only used when CMP_MODE
4556 is not in class MODE_CC (i.e. it is a real comparison).
4558 MODE is the mode of the result, while CMP_MODE specifies in which
4559 mode the comparison is done in, so it is the mode of the operands. */
4562 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4563 enum machine_mode cmp_mode, rtx op0, rtx op1)
4565 enum rtx_code op0code = GET_CODE (op0);
4567 if (op1 == const0_rtx && COMPARISON_P (op0))
4569 /* If op0 is a comparison, extract the comparison arguments
4573 if (GET_MODE (op0) == mode)
4574 return simplify_rtx (op0);
4576 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4577 XEXP (op0, 0), XEXP (op0, 1));
4579 else if (code == EQ)
4581 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4582 if (new_code != UNKNOWN)
4583 return simplify_gen_relational (new_code, mode, VOIDmode,
4584 XEXP (op0, 0), XEXP (op0, 1));
4588 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4589 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4590 if ((code == LTU || code == GEU)
4591 && GET_CODE (op0) == PLUS
4592 && CONST_INT_P (XEXP (op0, 1))
4593 && (rtx_equal_p (op1, XEXP (op0, 0))
4594 || rtx_equal_p (op1, XEXP (op0, 1)))
4595 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4596 && XEXP (op0, 1) != const0_rtx)
4599 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4600 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4601 cmp_mode, XEXP (op0, 0), new_cmp);
4604 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4605 if ((code == LTU || code == GEU)
4606 && GET_CODE (op0) == PLUS
4607 && rtx_equal_p (op1, XEXP (op0, 1))
4608 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4609 && !rtx_equal_p (op1, XEXP (op0, 0)))
4610 return simplify_gen_relational (code, mode, cmp_mode, op0,
4611 copy_rtx (XEXP (op0, 0)));
4613 if (op1 == const0_rtx)
4615 /* Canonicalize (GTU x 0) as (NE x 0). */
4617 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4618 /* Canonicalize (LEU x 0) as (EQ x 0). */
4620 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4622 else if (op1 == const1_rtx)
4627 /* Canonicalize (GE x 1) as (GT x 0). */
4628 return simplify_gen_relational (GT, mode, cmp_mode,
4631 /* Canonicalize (GEU x 1) as (NE x 0). */
4632 return simplify_gen_relational (NE, mode, cmp_mode,
4635 /* Canonicalize (LT x 1) as (LE x 0). */
4636 return simplify_gen_relational (LE, mode, cmp_mode,
4639 /* Canonicalize (LTU x 1) as (EQ x 0). */
4640 return simplify_gen_relational (EQ, mode, cmp_mode,
4646 else if (op1 == constm1_rtx)
4648 /* Canonicalize (LE x -1) as (LT x 0). */
4650 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4651 /* Canonicalize (GT x -1) as (GE x 0). */
4653 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4656 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4657 if ((code == EQ || code == NE)
4658 && (op0code == PLUS || op0code == MINUS)
4660 && CONSTANT_P (XEXP (op0, 1))
4661 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4663 rtx x = XEXP (op0, 0);
4664 rtx c = XEXP (op0, 1);
4665 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4666 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4668 /* Detect an infinite recursive condition, where we oscillate at this
4669 simplification case between:
4670 A + B == C <---> C - B == A,
4671 where A, B, and C are all constants with non-simplifiable expressions,
4672 usually SYMBOL_REFs. */
4673 if (GET_CODE (tem) == invcode
4675 && rtx_equal_p (c, XEXP (tem, 1)))
4678 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4681 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4682 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4684 && op1 == const0_rtx
4685 && GET_MODE_CLASS (mode) == MODE_INT
4686 && cmp_mode != VOIDmode
4687 /* ??? Work-around BImode bugs in the ia64 backend. */
4689 && cmp_mode != BImode
4690 && nonzero_bits (op0, cmp_mode) == 1
4691 && STORE_FLAG_VALUE == 1)
4692 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4693 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4694 : lowpart_subreg (mode, op0, cmp_mode);
4696 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4697 if ((code == EQ || code == NE)
4698 && op1 == const0_rtx
4700 return simplify_gen_relational (code, mode, cmp_mode,
4701 XEXP (op0, 0), XEXP (op0, 1));
4703 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4704 if ((code == EQ || code == NE)
4706 && rtx_equal_p (XEXP (op0, 0), op1)
4707 && !side_effects_p (XEXP (op0, 0)))
4708 return simplify_gen_relational (code, mode, cmp_mode,
4709 XEXP (op0, 1), const0_rtx);
4711 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4712 if ((code == EQ || code == NE)
4714 && rtx_equal_p (XEXP (op0, 1), op1)
4715 && !side_effects_p (XEXP (op0, 1)))
4716 return simplify_gen_relational (code, mode, cmp_mode,
4717 XEXP (op0, 0), const0_rtx);
4719 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4720 if ((code == EQ || code == NE)
4722 && CONST_SCALAR_INT_P (op1)
4723 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4724 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4725 simplify_gen_binary (XOR, cmp_mode,
4726 XEXP (op0, 1), op1));
4728 if (op0code == POPCOUNT && op1 == const0_rtx)
4734 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4735 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4736 XEXP (op0, 0), const0_rtx);
4741 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4742 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4743 XEXP (op0, 0), const0_rtx);
4762 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4763 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4764 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4765 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4766 For floating-point comparisons, assume that the operands were ordered. */
4769 comparison_result (enum rtx_code code, int known_results)
4775 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4778 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4782 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4785 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4789 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4792 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4795 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4797 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4800 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4802 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4805 return const_true_rtx;
4813 /* Check if the given comparison (done in the given MODE) is actually a
4814 tautology or a contradiction.
4815 If no simplification is possible, this function returns zero.
4816 Otherwise, it returns either const_true_rtx or const0_rtx. */
4819 simplify_const_relational_operation (enum rtx_code code,
4820 enum machine_mode mode,
4827 gcc_assert (mode != VOIDmode
4828 || (GET_MODE (op0) == VOIDmode
4829 && GET_MODE (op1) == VOIDmode));
4831 /* If op0 is a compare, extract the comparison arguments from it. */
4832 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4834 op1 = XEXP (op0, 1);
4835 op0 = XEXP (op0, 0);
4837 if (GET_MODE (op0) != VOIDmode)
4838 mode = GET_MODE (op0);
4839 else if (GET_MODE (op1) != VOIDmode)
4840 mode = GET_MODE (op1);
4845 /* We can't simplify MODE_CC values since we don't know what the
4846 actual comparison is. */
4847 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4850 /* Make sure the constant is second. */
4851 if (swap_commutative_operands_p (op0, op1))
4853 tem = op0, op0 = op1, op1 = tem;
4854 code = swap_condition (code);
4857 trueop0 = avoid_constant_pool_reference (op0);
4858 trueop1 = avoid_constant_pool_reference (op1);
4860 /* For integer comparisons of A and B maybe we can simplify A - B and can
4861 then simplify a comparison of that with zero. If A and B are both either
4862 a register or a CONST_INT, this can't help; testing for these cases will
4863 prevent infinite recursion here and speed things up.
4865 We can only do this for EQ and NE comparisons as otherwise we may
4866 lose or introduce overflow which we cannot disregard as undefined as
4867 we do not know the signedness of the operation on either the left or
4868 the right hand side of the comparison. */
4870 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4871 && (code == EQ || code == NE)
4872 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4873 && (REG_P (op1) || CONST_INT_P (trueop1)))
4874 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4875 /* We cannot do this if tem is a nonzero address. */
4876 && ! nonzero_address_p (tem))
4877 return simplify_const_relational_operation (signed_condition (code),
4878 mode, tem, const0_rtx);
4880 if (! HONOR_NANS (mode) && code == ORDERED)
4881 return const_true_rtx;
4883 if (! HONOR_NANS (mode) && code == UNORDERED)
4886 /* For modes without NaNs, if the two operands are equal, we know the
4887 result except if they have side-effects. Even with NaNs we know
4888 the result of unordered comparisons and, if signaling NaNs are
4889 irrelevant, also the result of LT/GT/LTGT. */
4890 if ((! HONOR_NANS (GET_MODE (trueop0))
4891 || code == UNEQ || code == UNLE || code == UNGE
4892 || ((code == LT || code == GT || code == LTGT)
4893 && ! HONOR_SNANS (GET_MODE (trueop0))))
4894 && rtx_equal_p (trueop0, trueop1)
4895 && ! side_effects_p (trueop0))
4896 return comparison_result (code, CMP_EQ);
4898 /* If the operands are floating-point constants, see if we can fold
4900 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4901 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4902 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4904 REAL_VALUE_TYPE d0, d1;
4906 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4907 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4909 /* Comparisons are unordered iff at least one of the values is NaN. */
4910 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4920 return const_true_rtx;
4933 return comparison_result (code,
4934 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4935 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4938 /* Otherwise, see if the operands are both integers. */
4939 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4940 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4941 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4943 int width = GET_MODE_PRECISION (mode);
4944 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4945 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4947 /* Get the two words comprising each integer constant. */
4948 if (CONST_DOUBLE_AS_INT_P (trueop0))
4950 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4951 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4955 l0u = l0s = INTVAL (trueop0);
4956 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4959 if (CONST_DOUBLE_AS_INT_P (trueop1))
4961 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4962 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4966 l1u = l1s = INTVAL (trueop1);
4967 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4970 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4971 we have to sign or zero-extend the values. */
4972 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4974 l0u &= GET_MODE_MASK (mode);
4975 l1u &= GET_MODE_MASK (mode);
4977 if (val_signbit_known_set_p (mode, l0s))
4978 l0s |= ~GET_MODE_MASK (mode);
4980 if (val_signbit_known_set_p (mode, l1s))
4981 l1s |= ~GET_MODE_MASK (mode);
4983 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4984 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4986 if (h0u == h1u && l0u == l1u)
4987 return comparison_result (code, CMP_EQ);
4991 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4992 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4993 return comparison_result (code, cr);
4997 /* Optimize comparisons with upper and lower bounds. */
4998 if (HWI_COMPUTABLE_MODE_P (mode)
4999 && CONST_INT_P (trueop1))
5002 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5003 HOST_WIDE_INT val = INTVAL (trueop1);
5004 HOST_WIDE_INT mmin, mmax;
5014 /* Get a reduced range if the sign bit is zero. */
5015 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5022 rtx mmin_rtx, mmax_rtx;
5023 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5025 mmin = INTVAL (mmin_rtx);
5026 mmax = INTVAL (mmax_rtx);
5029 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5031 mmin >>= (sign_copies - 1);
5032 mmax >>= (sign_copies - 1);
5038 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5040 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5041 return const_true_rtx;
5042 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5047 return const_true_rtx;
5052 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5054 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5055 return const_true_rtx;
5056 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5061 return const_true_rtx;
5067 /* x == y is always false for y out of range. */
5068 if (val < mmin || val > mmax)
5072 /* x > y is always false for y >= mmax, always true for y < mmin. */
5074 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5076 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5077 return const_true_rtx;
5083 return const_true_rtx;
5086 /* x < y is always false for y <= mmin, always true for y > mmax. */
5088 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5090 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5091 return const_true_rtx;
5097 return const_true_rtx;
5101 /* x != y is always true for y out of range. */
5102 if (val < mmin || val > mmax)
5103 return const_true_rtx;
5111 /* Optimize integer comparisons with zero. */
5112 if (trueop1 == const0_rtx)
5114 /* Some addresses are known to be nonzero. We don't know
5115 their sign, but equality comparisons are known. */
5116 if (nonzero_address_p (trueop0))
5118 if (code == EQ || code == LEU)
5120 if (code == NE || code == GTU)
5121 return const_true_rtx;
5124 /* See if the first operand is an IOR with a constant. If so, we
5125 may be able to determine the result of this comparison. */
5126 if (GET_CODE (op0) == IOR)
5128 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5129 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5131 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5132 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5133 && (UINTVAL (inner_const)
5134 & ((unsigned HOST_WIDE_INT) 1
5144 return const_true_rtx;
5148 return const_true_rtx;
5162 /* Optimize comparison of ABS with zero. */
5163 if (trueop1 == CONST0_RTX (mode)
5164 && (GET_CODE (trueop0) == ABS
5165 || (GET_CODE (trueop0) == FLOAT_EXTEND
5166 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5171 /* Optimize abs(x) < 0.0. */
5172 if (!HONOR_SNANS (mode)
5173 && (!INTEGRAL_MODE_P (mode)
5174 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5176 if (INTEGRAL_MODE_P (mode)
5177 && (issue_strict_overflow_warning
5178 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5179 warning (OPT_Wstrict_overflow,
5180 ("assuming signed overflow does not occur when "
5181 "assuming abs (x) < 0 is false"));
5187 /* Optimize abs(x) >= 0.0. */
5188 if (!HONOR_NANS (mode)
5189 && (!INTEGRAL_MODE_P (mode)
5190 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5192 if (INTEGRAL_MODE_P (mode)
5193 && (issue_strict_overflow_warning
5194 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5195 warning (OPT_Wstrict_overflow,
5196 ("assuming signed overflow does not occur when "
5197 "assuming abs (x) >= 0 is true"));
5198 return const_true_rtx;
5203 /* Optimize ! (abs(x) < 0.0). */
5204 return const_true_rtx;
5214 /* Simplify CODE, an operation with result mode MODE and three operands,
5215 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5216 a constant. Return 0 if no simplifications is possible. */
5219 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5220 enum machine_mode op0_mode, rtx op0, rtx op1,
5223 unsigned int width = GET_MODE_PRECISION (mode);
5224 bool any_change = false;
5227 /* VOIDmode means "infinite" precision. */
5229 width = HOST_BITS_PER_WIDE_INT;
5234 /* Simplify negations around the multiplication. */
5235 /* -a * -b + c => a * b + c. */
5236 if (GET_CODE (op0) == NEG)
5238 tem = simplify_unary_operation (NEG, mode, op1, mode);
5240 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5242 else if (GET_CODE (op1) == NEG)
5244 tem = simplify_unary_operation (NEG, mode, op0, mode);
5246 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5249 /* Canonicalize the two multiplication operands. */
5250 /* a * -b + c => -b * a + c. */
5251 if (swap_commutative_operands_p (op0, op1))
5252 tem = op0, op0 = op1, op1 = tem, any_change = true;
5255 return gen_rtx_FMA (mode, op0, op1, op2);
5260 if (CONST_INT_P (op0)
5261 && CONST_INT_P (op1)
5262 && CONST_INT_P (op2)
5263 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5264 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5266 /* Extracting a bit-field from a constant */
5267 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5268 HOST_WIDE_INT op1val = INTVAL (op1);
5269 HOST_WIDE_INT op2val = INTVAL (op2);
5270 if (BITS_BIG_ENDIAN)
5271 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5275 if (HOST_BITS_PER_WIDE_INT != op1val)
5277 /* First zero-extend. */
5278 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5279 /* If desired, propagate sign bit. */
5280 if (code == SIGN_EXTRACT
5281 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5283 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5286 return gen_int_mode (val, mode);
5291 if (CONST_INT_P (op0))
5292 return op0 != const0_rtx ? op1 : op2;
5294 /* Convert c ? a : a into "a". */
5295 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5298 /* Convert a != b ? a : b into "a". */
5299 if (GET_CODE (op0) == NE
5300 && ! side_effects_p (op0)
5301 && ! HONOR_NANS (mode)
5302 && ! HONOR_SIGNED_ZEROS (mode)
5303 && ((rtx_equal_p (XEXP (op0, 0), op1)
5304 && rtx_equal_p (XEXP (op0, 1), op2))
5305 || (rtx_equal_p (XEXP (op0, 0), op2)
5306 && rtx_equal_p (XEXP (op0, 1), op1))))
5309 /* Convert a == b ? a : b into "b". */
5310 if (GET_CODE (op0) == EQ
5311 && ! side_effects_p (op0)
5312 && ! HONOR_NANS (mode)
5313 && ! HONOR_SIGNED_ZEROS (mode)
5314 && ((rtx_equal_p (XEXP (op0, 0), op1)
5315 && rtx_equal_p (XEXP (op0, 1), op2))
5316 || (rtx_equal_p (XEXP (op0, 0), op2)
5317 && rtx_equal_p (XEXP (op0, 1), op1))))
5320 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5322 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5323 ? GET_MODE (XEXP (op0, 1))
5324 : GET_MODE (XEXP (op0, 0)));
5327 /* Look for happy constants in op1 and op2. */
5328 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5330 HOST_WIDE_INT t = INTVAL (op1);
5331 HOST_WIDE_INT f = INTVAL (op2);
5333 if (t == STORE_FLAG_VALUE && f == 0)
5334 code = GET_CODE (op0);
5335 else if (t == 0 && f == STORE_FLAG_VALUE)
5338 tmp = reversed_comparison_code (op0, NULL_RTX);
5346 return simplify_gen_relational (code, mode, cmp_mode,
5347 XEXP (op0, 0), XEXP (op0, 1));
5350 if (cmp_mode == VOIDmode)
5351 cmp_mode = op0_mode;
5352 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5353 cmp_mode, XEXP (op0, 0),
5356 /* See if any simplifications were possible. */
5359 if (CONST_INT_P (temp))
5360 return temp == const0_rtx ? op2 : op1;
5362 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5368 gcc_assert (GET_MODE (op0) == mode);
5369 gcc_assert (GET_MODE (op1) == mode);
5370 gcc_assert (VECTOR_MODE_P (mode));
5371 op2 = avoid_constant_pool_reference (op2);
5372 if (CONST_INT_P (op2))
5374 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5375 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5376 int mask = (1 << n_elts) - 1;
5378 if (!(INTVAL (op2) & mask))
5380 if ((INTVAL (op2) & mask) == mask)
5383 op0 = avoid_constant_pool_reference (op0);
5384 op1 = avoid_constant_pool_reference (op1);
5385 if (GET_CODE (op0) == CONST_VECTOR
5386 && GET_CODE (op1) == CONST_VECTOR)
5388 rtvec v = rtvec_alloc (n_elts);
5391 for (i = 0; i < n_elts; i++)
5392 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5393 ? CONST_VECTOR_ELT (op0, i)
5394 : CONST_VECTOR_ELT (op1, i));
5395 return gen_rtx_CONST_VECTOR (mode, v);
5407 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5409 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5411 Works by unpacking OP into a collection of 8-bit values
5412 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5413 and then repacking them again for OUTERMODE. */
5416 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5417 enum machine_mode innermode, unsigned int byte)
5419 /* We support up to 512-bit values (for V8DFmode). */
5423 value_mask = (1 << value_bit) - 1
5425 unsigned char value[max_bitsize / value_bit];
5434 rtvec result_v = NULL;
5435 enum mode_class outer_class;
5436 enum machine_mode outer_submode;
5438 /* Some ports misuse CCmode. */
5439 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5442 /* We have no way to represent a complex constant at the rtl level. */
5443 if (COMPLEX_MODE_P (outermode))
5446 /* Unpack the value. */
5448 if (GET_CODE (op) == CONST_VECTOR)
5450 num_elem = CONST_VECTOR_NUNITS (op);
5451 elems = &CONST_VECTOR_ELT (op, 0);
5452 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5458 elem_bitsize = max_bitsize;
5460 /* If this asserts, it is too complicated; reducing value_bit may help. */
5461 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5462 /* I don't know how to handle endianness of sub-units. */
5463 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5465 for (elem = 0; elem < num_elem; elem++)
5468 rtx el = elems[elem];
5470 /* Vectors are kept in target memory order. (This is probably
5473 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5474 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5476 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5477 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5478 unsigned bytele = (subword_byte % UNITS_PER_WORD
5479 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5480 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5483 switch (GET_CODE (el))
5487 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5489 *vp++ = INTVAL (el) >> i;
5490 /* CONST_INTs are always logically sign-extended. */
5491 for (; i < elem_bitsize; i += value_bit)
5492 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5496 if (GET_MODE (el) == VOIDmode)
5498 unsigned char extend = 0;
5499 /* If this triggers, someone should have generated a
5500 CONST_INT instead. */
5501 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5503 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5504 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5505 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5508 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5512 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5514 for (; i < elem_bitsize; i += value_bit)
5519 long tmp[max_bitsize / 32];
5520 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5522 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5523 gcc_assert (bitsize <= elem_bitsize);
5524 gcc_assert (bitsize % value_bit == 0);
5526 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5529 /* real_to_target produces its result in words affected by
5530 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5531 and use WORDS_BIG_ENDIAN instead; see the documentation
5532 of SUBREG in rtl.texi. */
5533 for (i = 0; i < bitsize; i += value_bit)
5536 if (WORDS_BIG_ENDIAN)
5537 ibase = bitsize - 1 - i;
5540 *vp++ = tmp[ibase / 32] >> i % 32;
5543 /* It shouldn't matter what's done here, so fill it with
5545 for (; i < elem_bitsize; i += value_bit)
5551 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5553 for (i = 0; i < elem_bitsize; i += value_bit)
5554 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5558 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5559 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5560 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5562 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5563 >> (i - HOST_BITS_PER_WIDE_INT);
5564 for (; i < elem_bitsize; i += value_bit)
5574 /* Now, pick the right byte to start with. */
5575 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5576 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5577 will already have offset 0. */
5578 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5580 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5582 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5583 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5584 byte = (subword_byte % UNITS_PER_WORD
5585 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5588 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5589 so if it's become negative it will instead be very large.) */
5590 gcc_assert (byte < GET_MODE_SIZE (innermode));
5592 /* Convert from bytes to chunks of size value_bit. */
5593 value_start = byte * (BITS_PER_UNIT / value_bit);
5595 /* Re-pack the value. */
5597 if (VECTOR_MODE_P (outermode))
5599 num_elem = GET_MODE_NUNITS (outermode);
5600 result_v = rtvec_alloc (num_elem);
5601 elems = &RTVEC_ELT (result_v, 0);
5602 outer_submode = GET_MODE_INNER (outermode);
5608 outer_submode = outermode;
5611 outer_class = GET_MODE_CLASS (outer_submode);
5612 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5614 gcc_assert (elem_bitsize % value_bit == 0);
5615 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5617 for (elem = 0; elem < num_elem; elem++)
5621 /* Vectors are stored in target memory order. (This is probably
5624 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5625 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5627 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5628 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5629 unsigned bytele = (subword_byte % UNITS_PER_WORD
5630 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5631 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5634 switch (outer_class)
5637 case MODE_PARTIAL_INT:
5639 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5642 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5644 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5645 for (; i < elem_bitsize; i += value_bit)
5646 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5647 << (i - HOST_BITS_PER_WIDE_INT);
5649 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5651 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5652 elems[elem] = gen_int_mode (lo, outer_submode);
5653 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5654 elems[elem] = immed_double_const (lo, hi, outer_submode);
5661 case MODE_DECIMAL_FLOAT:
5664 long tmp[max_bitsize / 32];
5666 /* real_from_target wants its input in words affected by
5667 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5668 and use WORDS_BIG_ENDIAN instead; see the documentation
5669 of SUBREG in rtl.texi. */
5670 for (i = 0; i < max_bitsize / 32; i++)
5672 for (i = 0; i < elem_bitsize; i += value_bit)
5675 if (WORDS_BIG_ENDIAN)
5676 ibase = elem_bitsize - 1 - i;
5679 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5682 real_from_target (&r, tmp, outer_submode);
5683 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5695 f.mode = outer_submode;
5698 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5700 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5701 for (; i < elem_bitsize; i += value_bit)
5702 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5703 << (i - HOST_BITS_PER_WIDE_INT));
5705 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5713 if (VECTOR_MODE_P (outermode))
5714 return gen_rtx_CONST_VECTOR (outermode, result_v);
5719 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5720 Return 0 if no simplifications are possible. */
5722 simplify_subreg (enum machine_mode outermode, rtx op,
5723 enum machine_mode innermode, unsigned int byte)
5725 /* Little bit of sanity checking. */
5726 gcc_assert (innermode != VOIDmode);
5727 gcc_assert (outermode != VOIDmode);
5728 gcc_assert (innermode != BLKmode);
5729 gcc_assert (outermode != BLKmode);
5731 gcc_assert (GET_MODE (op) == innermode
5732 || GET_MODE (op) == VOIDmode);
5734 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5737 if (byte >= GET_MODE_SIZE (innermode))
5740 if (outermode == innermode && !byte)
5743 if (CONST_SCALAR_INT_P (op)
5744 || CONST_DOUBLE_AS_FLOAT_P (op)
5745 || GET_CODE (op) == CONST_FIXED
5746 || GET_CODE (op) == CONST_VECTOR)
5747 return simplify_immed_subreg (outermode, op, innermode, byte);
5749 /* Changing mode twice with SUBREG => just change it once,
5750 or not at all if changing back op starting mode. */
5751 if (GET_CODE (op) == SUBREG)
5753 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5754 int final_offset = byte + SUBREG_BYTE (op);
5757 if (outermode == innermostmode
5758 && byte == 0 && SUBREG_BYTE (op) == 0)
5759 return SUBREG_REG (op);
5761 /* The SUBREG_BYTE represents offset, as if the value were stored
5762 in memory. Irritating exception is paradoxical subreg, where
5763 we define SUBREG_BYTE to be 0. On big endian machines, this
5764 value should be negative. For a moment, undo this exception. */
5765 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5767 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5768 if (WORDS_BIG_ENDIAN)
5769 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5770 if (BYTES_BIG_ENDIAN)
5771 final_offset += difference % UNITS_PER_WORD;
5773 if (SUBREG_BYTE (op) == 0
5774 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5776 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5777 if (WORDS_BIG_ENDIAN)
5778 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5779 if (BYTES_BIG_ENDIAN)
5780 final_offset += difference % UNITS_PER_WORD;
5783 /* See whether resulting subreg will be paradoxical. */
5784 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5786 /* In nonparadoxical subregs we can't handle negative offsets. */
5787 if (final_offset < 0)
5789 /* Bail out in case resulting subreg would be incorrect. */
5790 if (final_offset % GET_MODE_SIZE (outermode)
5791 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5797 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5799 /* In paradoxical subreg, see if we are still looking on lower part.
5800 If so, our SUBREG_BYTE will be 0. */
5801 if (WORDS_BIG_ENDIAN)
5802 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5803 if (BYTES_BIG_ENDIAN)
5804 offset += difference % UNITS_PER_WORD;
5805 if (offset == final_offset)
5811 /* Recurse for further possible simplifications. */
5812 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5816 if (validate_subreg (outermode, innermostmode,
5817 SUBREG_REG (op), final_offset))
5819 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5820 if (SUBREG_PROMOTED_VAR_P (op)
5821 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5822 && GET_MODE_CLASS (outermode) == MODE_INT
5823 && IN_RANGE (GET_MODE_SIZE (outermode),
5824 GET_MODE_SIZE (innermode),
5825 GET_MODE_SIZE (innermostmode))
5826 && subreg_lowpart_p (newx))
5828 SUBREG_PROMOTED_VAR_P (newx) = 1;
5829 SUBREG_PROMOTED_UNSIGNED_SET
5830 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5837 /* SUBREG of a hard register => just change the register number
5838 and/or mode. If the hard register is not valid in that mode,
5839 suppress this simplification. If the hard register is the stack,
5840 frame, or argument pointer, leave this as a SUBREG. */
5842 if (REG_P (op) && HARD_REGISTER_P (op))
5844 unsigned int regno, final_regno;
5847 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5848 if (HARD_REGISTER_NUM_P (final_regno))
5851 int final_offset = byte;
5853 /* Adjust offset for paradoxical subregs. */
5855 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5857 int difference = (GET_MODE_SIZE (innermode)
5858 - GET_MODE_SIZE (outermode));
5859 if (WORDS_BIG_ENDIAN)
5860 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5861 if (BYTES_BIG_ENDIAN)
5862 final_offset += difference % UNITS_PER_WORD;
5865 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5867 /* Propagate original regno. We don't have any way to specify
5868 the offset inside original regno, so do so only for lowpart.
5869 The information is used only by alias analysis that can not
5870 grog partial register anyway. */
5872 if (subreg_lowpart_offset (outermode, innermode) == byte)
5873 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5878 /* If we have a SUBREG of a register that we are replacing and we are
5879 replacing it with a MEM, make a new MEM and try replacing the
5880 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5881 or if we would be widening it. */
5884 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5885 /* Allow splitting of volatile memory references in case we don't
5886 have instruction to move the whole thing. */
5887 && (! MEM_VOLATILE_P (op)
5888 || ! have_insn_for (SET, innermode))
5889 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5890 return adjust_address_nv (op, outermode, byte);
5892 /* Handle complex values represented as CONCAT
5893 of real and imaginary part. */
5894 if (GET_CODE (op) == CONCAT)
5896 unsigned int part_size, final_offset;
5899 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5900 if (byte < part_size)
5902 part = XEXP (op, 0);
5903 final_offset = byte;
5907 part = XEXP (op, 1);
5908 final_offset = byte - part_size;
5911 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5914 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5917 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5918 return gen_rtx_SUBREG (outermode, part, final_offset);
5922 /* A SUBREG resulting from a zero extension may fold to zero if
5923 it extracts higher bits that the ZERO_EXTEND's source bits. */
5924 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5926 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5927 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5928 return CONST0_RTX (outermode);
5931 if (SCALAR_INT_MODE_P (outermode)
5932 && SCALAR_INT_MODE_P (innermode)
5933 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5934 && byte == subreg_lowpart_offset (outermode, innermode))
5936 rtx tem = simplify_truncation (outermode, op, innermode);
5944 /* Make a SUBREG operation or equivalent if it folds. */
5947 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5948 enum machine_mode innermode, unsigned int byte)
5952 newx = simplify_subreg (outermode, op, innermode, byte);
5956 if (GET_CODE (op) == SUBREG
5957 || GET_CODE (op) == CONCAT
5958 || GET_MODE (op) == VOIDmode)
5961 if (validate_subreg (outermode, innermode, op, byte))
5962 return gen_rtx_SUBREG (outermode, op, byte);
5967 /* Simplify X, an rtx expression.
5969 Return the simplified expression or NULL if no simplifications
5972 This is the preferred entry point into the simplification routines;
5973 however, we still allow passes to call the more specific routines.
5975 Right now GCC has three (yes, three) major bodies of RTL simplification
5976 code that need to be unified.
5978 1. fold_rtx in cse.c. This code uses various CSE specific
5979 information to aid in RTL simplification.
5981 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5982 it uses combine specific information to aid in RTL
5985 3. The routines in this file.
5988 Long term we want to only have one body of simplification code; to
5989 get to that state I recommend the following steps:
5991 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5992 which are not pass dependent state into these routines.
5994 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5995 use this routine whenever possible.
5997 3. Allow for pass dependent state to be provided to these
5998 routines and add simplifications based on the pass dependent
5999 state. Remove code from cse.c & combine.c that becomes
6002 It will take time, but ultimately the compiler will be easier to
6003 maintain and improve. It's totally silly that when we add a
6004 simplification that it needs to be added to 4 places (3 for RTL
6005 simplification and 1 for tree simplification. */
6008 simplify_rtx (const_rtx x)
6010 const enum rtx_code code = GET_CODE (x);
6011 const enum machine_mode mode = GET_MODE (x);
6013 switch (GET_RTX_CLASS (code))
6016 return simplify_unary_operation (code, mode,
6017 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6018 case RTX_COMM_ARITH:
6019 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6020 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6022 /* Fall through.... */
6025 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6028 case RTX_BITFIELD_OPS:
6029 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6030 XEXP (x, 0), XEXP (x, 1),
6034 case RTX_COMM_COMPARE:
6035 return simplify_relational_operation (code, mode,
6036 ((GET_MODE (XEXP (x, 0))
6038 ? GET_MODE (XEXP (x, 0))
6039 : GET_MODE (XEXP (x, 1))),
6045 return simplify_subreg (mode, SUBREG_REG (x),
6046 GET_MODE (SUBREG_REG (x)),
6053 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6054 if (GET_CODE (XEXP (x, 0)) == HIGH
6055 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))