/* RTL simplification functions for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
- Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011, 2012 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "flags.h"
-#include "real.h"
#include "insn-config.h"
#include "recog.h"
#include "function.h"
#include "expr.h"
-#include "toplev.h"
-#include "output.h"
+#include "diagnostic-core.h"
#include "ggc.h"
#include "target.h"
#define HWI_SIGN_EXTEND(low) \
((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
-static rtx neg_const_int (enum machine_mode, rtx);
-static bool plus_minus_operand_p (rtx);
-static int simplify_plus_minus_op_data_cmp (const void *, const void *);
+static rtx neg_const_int (enum machine_mode, const_rtx);
+static bool plus_minus_operand_p (const_rtx);
+static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
unsigned int);
/* Negate a CONST_INT rtx, truncating (because a conversion from a
maximally negative number can overflow). */
static rtx
-neg_const_int (enum machine_mode mode, rtx i)
+neg_const_int (enum machine_mode mode, const_rtx i)
{
return gen_int_mode (- INTVAL (i), mode);
}
the most significant bit of machine mode MODE. */
bool
-mode_signbit_p (enum machine_mode mode, rtx x)
+mode_signbit_p (enum machine_mode mode, const_rtx x)
{
unsigned HOST_WIDE_INT val;
unsigned int width;
if (GET_MODE_CLASS (mode) != MODE_INT)
return false;
- width = GET_MODE_BITSIZE (mode);
+ width = GET_MODE_PRECISION (mode);
if (width == 0)
return false;
-
+
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (x) == CONST_INT)
+ && CONST_INT_P (x))
val = INTVAL (x);
- else if (width <= 2 * HOST_BITS_PER_WIDE_INT
+ else if (width <= HOST_BITS_PER_DOUBLE_INT
&& GET_CODE (x) == CONST_DOUBLE
&& CONST_DOUBLE_LOW (x) == 0)
{
width -= HOST_BITS_PER_WIDE_INT;
}
else
+ /* FIXME: We don't yet have a representation for wider modes. */
return false;
if (width < HOST_BITS_PER_WIDE_INT)
val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
}
+
+/* Test whether VAL is equal to the most significant bit of mode MODE
+ (after masking with the mode mask of MODE). Returns false if the
+ precision of MODE is too large to handle. */
+
+bool
+val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= GET_MODE_MASK (mode);
+ return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
+}
+
+/* Test whether the most significant bit of mode MODE is set in VAL.
+ Returns false if the precision of MODE is too large to handle. */
+bool
+val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
+ return val != 0;
+}
+
+/* Test whether the most significant bit of mode MODE is clear in VAL.
+ Returns false if the precision of MODE is too large to handle. */
+bool
+val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
+ return val == 0;
+}
\f
/* Make a binary operation by properly ordering the operands and
seeing if the expression folds. */
/* Split the address into a base and integer offset. */
if (GET_CODE (addr) == CONST
&& GET_CODE (XEXP (addr, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
{
offset = INTVAL (XEXP (XEXP (addr, 0), 1));
addr = XEXP (XEXP (addr, 0), 0);
return x;
}
\f
+/* Simplify a MEM based on its attributes. This is the default
+ delegitimize_address target hook, and it's recommended that every
+ overrider call it. */
+
+rtx
+delegitimize_mem_from_attrs (rtx x)
+{
+ /* MEMs without MEM_OFFSETs may have been offset, so we can't just
+ use their base addresses as equivalent. */
+ if (MEM_P (x)
+ && MEM_EXPR (x)
+ && MEM_OFFSET_KNOWN_P (x))
+ {
+ tree decl = MEM_EXPR (x);
+ enum machine_mode mode = GET_MODE (x);
+ HOST_WIDE_INT offset = 0;
+
+ switch (TREE_CODE (decl))
+ {
+ default:
+ decl = NULL;
+ break;
+
+ case VAR_DECL:
+ break;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ {
+ HOST_WIDE_INT bitsize, bitpos;
+ tree toffset;
+ int unsignedp, volatilep = 0;
+
+ decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
+ &mode, &unsignedp, &volatilep, false);
+ if (bitsize != GET_MODE_BITSIZE (mode)
+ || (bitpos % BITS_PER_UNIT)
+ || (toffset && !host_integerp (toffset, 0)))
+ decl = NULL;
+ else
+ {
+ offset += bitpos / BITS_PER_UNIT;
+ if (toffset)
+ offset += TREE_INT_CST_LOW (toffset);
+ }
+ break;
+ }
+ }
+
+ if (decl
+ && mode == GET_MODE (x)
+ && TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl)
+ || DECL_THREAD_LOCAL_P (decl))
+ && DECL_RTL_SET_P (decl)
+ && MEM_P (DECL_RTL (decl)))
+ {
+ rtx newx;
+
+ offset += MEM_OFFSET (x);
+
+ newx = DECL_RTL (decl);
+
+ if (MEM_P (newx))
+ {
+ rtx n = XEXP (newx, 0), o = XEXP (x, 0);
+
+ /* Avoid creating a new MEM needlessly if we already had
+ the same address. We do if there's no OFFSET and the
+ old address X is identical to NEWX, or if X is of the
+ form (plus NEWX OFFSET), or the NEWX is of the form
+ (plus Y (const_int Z)) and X is that with the offset
+ added: (plus Y (const_int Z+OFFSET)). */
+ if (!((offset == 0
+ || (GET_CODE (o) == PLUS
+ && GET_CODE (XEXP (o, 1)) == CONST_INT
+ && (offset == INTVAL (XEXP (o, 1))
+ || (GET_CODE (n) == PLUS
+ && GET_CODE (XEXP (n, 1)) == CONST_INT
+ && (INTVAL (XEXP (n, 1)) + offset
+ == INTVAL (XEXP (o, 1)))
+ && (n = XEXP (n, 0))))
+ && (o = XEXP (o, 0))))
+ && rtx_equal_p (o, n)))
+ x = adjust_address_nv (newx, mode, offset);
+ }
+ else if (GET_MODE (x) == GET_MODE (newx)
+ && offset == 0)
+ x = newx;
+ }
+ }
+
+ return x;
+}
+\f
/* Make a unary operation by first seeing if it folds and otherwise making
the specified operation. */
return gen_rtx_fmt_ee (code, mode, op0, op1);
}
\f
-/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
- resulting RTX. Return a new RTX which is as simplified as possible. */
+/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
+ and simplify the result. If FN is non-NULL, call this callback on each
+ X, if it returns non-NULL, replace X with its return value and simplify the
+ result. */
rtx
-simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
+simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
+ rtx (*fn) (rtx, const_rtx, void *), void *data)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
enum machine_mode op_mode;
- rtx op0, op1, op2;
-
- /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
- to build a new expression substituting recursively. If we can't do
- anything, return our input. */
+ const char *fmt;
+ rtx op0, op1, op2, newx, op;
+ rtvec vec, newvec;
+ int i, j;
- if (x == old_rtx)
- return new_rtx;
+ if (__builtin_expect (fn != NULL, 0))
+ {
+ newx = fn (x, old_rtx, data);
+ if (newx)
+ return newx;
+ }
+ else if (rtx_equal_p (x, old_rtx))
+ return copy_rtx ((rtx) data);
switch (GET_RTX_CLASS (code))
{
case RTX_UNARY:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
if (op0 == XEXP (x, 0))
return x;
return simplify_gen_unary (code, mode, op0, op_mode);
case RTX_BIN_ARITH:
case RTX_COMM_ARITH:
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_binary (code, mode, op0, op1);
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
- op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_relational (code, mode, op_mode, op0, op1);
case RTX_BITFIELD_OPS:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
- op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
+ op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
return x;
if (op_mode == VOIDmode)
return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
case RTX_EXTRA:
- /* The only case we try to handle is a SUBREG. */
if (code == SUBREG)
{
- op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
if (op0 == SUBREG_REG (x))
return x;
op0 = simplify_gen_subreg (GET_MODE (x), op0,
case RTX_OBJ:
if (code == MEM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
if (op0 == XEXP (x, 0))
return x;
return replace_equiv_address_nv (x, op0);
}
else if (code == LO_SUM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
/* (lo_sum (high x) x) -> x */
if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
return x;
return gen_rtx_LO_SUM (mode, op0, op1);
}
- else if (code == REG)
- {
- if (rtx_equal_p (x, old_rtx))
- return new_rtx;
- }
break;
default:
break;
}
- return x;
+
+ newx = x;
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; fmt[i]; i++)
+ switch (fmt[i])
+ {
+ case 'E':
+ vec = XVEC (x, i);
+ newvec = XVEC (newx, i);
+ for (j = 0; j < GET_NUM_ELEM (vec); j++)
+ {
+ op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
+ old_rtx, fn, data);
+ if (op != RTVEC_ELT (vec, j))
+ {
+ if (newvec == vec)
+ {
+ newvec = shallow_copy_rtvec (vec);
+ if (x == newx)
+ newx = shallow_copy_rtx (x);
+ XVEC (newx, i) = newvec;
+ }
+ RTVEC_ELT (newvec, j) = op;
+ }
+ }
+ break;
+
+ case 'e':
+ if (XEXP (x, i))
+ {
+ op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
+ if (op != XEXP (x, i))
+ {
+ if (x == newx)
+ newx = shallow_copy_rtx (x);
+ XEXP (newx, i) = op;
+ }
+ }
+ break;
+ }
+ return newx;
+}
+
+/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
+ resulting RTX. Return a new RTX which is as simplified as possible. */
+
+rtx
+simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
+{
+ return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
}
\f
/* Try to simplify a unary operation CODE whose output mode is to be
{
rtx trueop, tem;
- if (GET_CODE (op) == CONST)
- op = XEXP (op, 0);
-
trueop = avoid_constant_pool_reference (op);
tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
/* Similarly, (not (neg X)) is (plus X -1). */
if (GET_CODE (op) == NEG)
- return plus_constant (XEXP (op, 0), -1);
+ return plus_constant (mode, XEXP (op, 0), -1);
/* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
if (GET_CODE (op) == XOR
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (op, 1))
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1), mode)) != 0)
return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
/* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
if (GET_CODE (op) == PLUS
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (op, 1))
&& mode_signbit_p (mode, XEXP (op, 1))
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1), mode)) != 0)
/* (not (ashiftrt foo C)) where C is the number of bits in FOO
minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
so we can perform the above simplification. */
-
+
if (STORE_FLAG_VALUE == -1
&& GET_CODE (op) == ASHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && GET_CODE (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_relational (GE, mode, VOIDmode,
XEXP (op, 0), const0_rtx);
if (GET_CODE (op) == PLUS
&& XEXP (op, 1) == const1_rtx)
return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
-
+
/* Similarly, (neg (not X)) is (plus X 1). */
if (GET_CODE (op) == NOT)
- return plus_constant (XEXP (op, 0), 1);
-
+ return plus_constant (mode, XEXP (op, 0), 1);
+
/* (neg (minus X Y)) can become (minus Y X). This transformation
isn't safe for modes with signed zeros, since if X and Y are
both +0, (minus Y X) is the same as (minus X Y). If the
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
-
+
if (GET_CODE (op) == PLUS
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
/* (neg (plus A C)) is simplified to (minus -C A). */
- if (GET_CODE (XEXP (op, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (op, 1))
|| GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
{
temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
}
- /* (neg (mult A B)) becomes (mult (neg A) B).
+ /* (neg (mult A B)) becomes (mult A (neg B)).
This works even for floating-point values. */
if (GET_CODE (op) == MULT
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
- temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
- return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
+ temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
+ return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
}
/* NEG commutes with ASHIFT since it is multiplication. Only do
/* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == ASHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_binary (LSHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
/* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == LSHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
-
+
/* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
if (GET_CODE (op) == XOR
&& XEXP (op, 1) == const1_rtx
&& nonzero_bits (XEXP (op, 0), mode) == 1)
- return plus_constant (XEXP (op, 0), -1);
+ return plus_constant (mode, XEXP (op, 0), -1);
/* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
/* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
if (GET_CODE (op) == LT
- && XEXP (op, 1) == const0_rtx)
+ && XEXP (op, 1) == const0_rtx
+ && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
{
enum machine_mode inner = GET_MODE (XEXP (op, 0));
- int isize = GET_MODE_BITSIZE (inner);
+ int isize = GET_MODE_PRECISION (inner);
if (STORE_FLAG_VALUE == 1)
{
temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
GEN_INT (isize - 1));
if (mode == inner)
return temp;
- if (GET_MODE_BITSIZE (mode) > isize)
+ if (GET_MODE_PRECISION (mode) > isize)
return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
return simplify_gen_unary (TRUNCATE, mode, temp, inner);
}
GEN_INT (isize - 1));
if (mode == inner)
return temp;
- if (GET_MODE_BITSIZE (mode) > isize)
+ if (GET_MODE_PRECISION (mode) > isize)
return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
return simplify_gen_unary (TRUNCATE, mode, temp, inner);
}
replace the TRUNCATE with a SUBREG. Note that this is also
valid if TRULY_NOOP_TRUNCATION is false for the corresponding
modes we just have to apply a different definition for
- truncation. But don't do this for an (LSHIFTRT (MULT ...))
+ truncation. But don't do this for an (LSHIFTRT (MULT ...))
since this will cause problems with the umulXi3_highpart
patterns. */
- if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
- GET_MODE_BITSIZE (GET_MODE (op)))
+ if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
? (num_sign_bit_copies (op, GET_MODE (op))
- > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
- - GET_MODE_BITSIZE (mode)))
+ > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
+ - GET_MODE_PRECISION (mode)))
: truncated_to_mode (mode, op))
&& ! (GET_CODE (op) == LSHIFTRT
&& GET_CODE (XEXP (op, 0)) == MULT))
STORE_FLAG_VALUE permits. This is like the previous test,
but it works even if the comparison is done in a mode larger
than HOST_BITS_PER_WIDE_INT. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& COMPARISON_P (op)
- && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
+ && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
return rtl_hooks.gen_lowpart_no_emit (mode, op);
break;
/* (float_truncate (float x)) is (float x) */
if (GET_CODE (op) == FLOAT
&& (flag_unsafe_math_optimizations
- || ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
- - num_sign_bit_copies (XEXP (op, 0),
- GET_MODE (XEXP (op, 0)))))))
+ || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
+ && ((unsigned)significand_size (GET_MODE (op))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
+ - num_sign_bit_copies (XEXP (op, 0),
+ GET_MODE (XEXP (op, 0))))))))
return simplify_gen_unary (FLOAT, mode,
XEXP (op, 0),
GET_MODE (XEXP (op, 0)));
*/
if (GET_CODE (op) == FLOAT_EXTEND
|| (GET_CODE (op) == FLOAT
+ && SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
- num_sign_bit_copies (XEXP (op, 0),
GET_MODE (XEXP (op, 0)))))))
return simplify_gen_unary (GET_CODE (op), mode,
/* If operand is something known to be positive, ignore the ABS. */
if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
- || ((GET_MODE_BITSIZE (GET_MODE (op))
- <= HOST_BITS_PER_WIDE_INT)
- && ((nonzero_bits (op, GET_MODE (op))
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
- == 0)))
+ || val_signbit_known_clear_p (GET_MODE (op),
+ nonzero_bits (op, GET_MODE (op))))
return op;
/* If operand is known to be only -1 or 0, convert ABS to NEG. */
- if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
+ if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
return gen_rtx_NEG (mode, op);
break;
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
return XEXP (op, 0);
+ /* Extending a widening multiplication should be canonicalized to
+ a wider widening multiplication. */
+ if (GET_CODE (op) == MULT)
+ {
+ rtx lhs = XEXP (op, 0);
+ rtx rhs = XEXP (op, 1);
+ enum rtx_code lcode = GET_CODE (lhs);
+ enum rtx_code rcode = GET_CODE (rhs);
+
+ /* Widening multiplies usually extend both operands, but sometimes
+ they use a shift to extract a portion of a register. */
+ if ((lcode == SIGN_EXTEND
+ || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
+ && (rcode == SIGN_EXTEND
+ || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
+ {
+ enum machine_mode lmode = GET_MODE (lhs);
+ enum machine_mode rmode = GET_MODE (rhs);
+ int bits;
+
+ if (lcode == ASHIFTRT)
+ /* Number of bits not shifted off the end. */
+ bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ else /* lcode == SIGN_EXTEND */
+ /* Size of inner mode. */
+ bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+
+ if (rcode == ASHIFTRT)
+ bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ else /* rcode == SIGN_EXTEND */
+ bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+
+ /* We can only widen multiplies if the result is mathematiclly
+ equivalent. I.e. if overflow was impossible. */
+ if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ return simplify_gen_binary
+ (MULT, mode,
+ simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
+ simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
+ }
+ }
+
/* Check for a sign extension of a subreg of a promoted
variable, where the promotion is sign-extended, and the
target mode is the same as the variable's promotion. */
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
&& ! SUBREG_PROMOTED_UNSIGNED_P (op)
- && GET_MODE (XEXP (op, 0)) == mode)
- return XEXP (op, 0);
+ && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
+ return rtl_hooks.gen_lowpart_no_emit (mode, op);
+
+ /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
+ (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
+ if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
+ {
+ gcc_assert (GET_MODE_BITSIZE (mode)
+ > GET_MODE_BITSIZE (GET_MODE (op)));
+ return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+ }
+
+ /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is (sign_extend:M (subreg:O <X>)) if there is mode with
+ GET_MODE_BITSIZE (N) - I bits.
+ (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is similarly (zero_extend:M (subreg:O <X>)). */
+ if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
+ && GET_CODE (XEXP (op, 0)) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
+ && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ {
+ enum machine_mode tmode
+ = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ gcc_assert (GET_MODE_BITSIZE (mode)
+ > GET_MODE_BITSIZE (GET_MODE (op)));
+ if (tmode != BLKmode)
+ {
+ rtx inner =
+ rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
+ return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
+ ? SIGN_EXTEND : ZERO_EXTEND,
+ mode, inner, tmode);
+ }
+ }
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
- if (! POINTERS_EXTEND_UNSIGNED
+ /* As we do not know which address space the pointer is referring to,
+ we can do this only if the target does not support different pointer
+ or address modes depending on the address space. */
+ if (target_default_pointer_address_modes_p ()
+ && ! POINTERS_EXTEND_UNSIGNED
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
&& SUBREG_PROMOTED_UNSIGNED_P (op) > 0
- && GET_MODE (XEXP (op, 0)) == mode)
- return XEXP (op, 0);
+ && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
+ return rtl_hooks.gen_lowpart_no_emit (mode, op);
+
+ /* Extending a widening multiplication should be canonicalized to
+ a wider widening multiplication. */
+ if (GET_CODE (op) == MULT)
+ {
+ rtx lhs = XEXP (op, 0);
+ rtx rhs = XEXP (op, 1);
+ enum rtx_code lcode = GET_CODE (lhs);
+ enum rtx_code rcode = GET_CODE (rhs);
+
+ /* Widening multiplies usually extend both operands, but sometimes
+ they use a shift to extract a portion of a register. */
+ if ((lcode == ZERO_EXTEND
+ || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
+ && (rcode == ZERO_EXTEND
+ || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
+ {
+ enum machine_mode lmode = GET_MODE (lhs);
+ enum machine_mode rmode = GET_MODE (rhs);
+ int bits;
+
+ if (lcode == LSHIFTRT)
+ /* Number of bits not shifted off the end. */
+ bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ else /* lcode == ZERO_EXTEND */
+ /* Size of inner mode. */
+ bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+
+ if (rcode == LSHIFTRT)
+ bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ else /* rcode == ZERO_EXTEND */
+ bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+
+ /* We can only widen multiplies if the result is mathematiclly
+ equivalent. I.e. if overflow was impossible. */
+ if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ return simplify_gen_binary
+ (MULT, mode,
+ simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
+ simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
+ }
+ }
+
+ /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
+ if (GET_CODE (op) == ZERO_EXTEND)
+ return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+
+ /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is (zero_extend:M (subreg:O <X>)) if there is mode with
+ GET_MODE_BITSIZE (N) - I bits. */
+ if (GET_CODE (op) == LSHIFTRT
+ && GET_CODE (XEXP (op, 0)) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
+ && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ {
+ enum machine_mode tmode
+ = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ if (tmode != BLKmode)
+ {
+ rtx inner =
+ rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
+ return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
+ }
+ }
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
- if (POINTERS_EXTEND_UNSIGNED > 0
+ /* As we do not know which address space the pointer is referring to,
+ we can do this only if the target does not support different pointer
+ or address modes depending on the address space. */
+ if (target_default_pointer_address_modes_p ()
+ && POINTERS_EXTEND_UNSIGNED > 0
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
default:
break;
}
-
+
return 0;
}
simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode)
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
+ unsigned int op_width = GET_MODE_PRECISION (op_mode);
if (code == VEC_DUPLICATE)
{
gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
(GET_MODE (op)));
}
- if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
+ if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
|| GET_CODE (op) == CONST_VECTOR)
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
such as FIX. At some point, this should be simplified. */
if (code == FLOAT && GET_MODE (op) == VOIDmode
- && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
{
HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (GET_CODE (op) == CONST_INT)
+ if (CONST_INT_P (op))
lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else
lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
}
else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
&& (GET_CODE (op) == CONST_DOUBLE
- || GET_CODE (op) == CONST_INT))
+ || CONST_INT_P (op)))
{
HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (GET_CODE (op) == CONST_INT)
+ if (CONST_INT_P (op))
lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else
lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
- if (op_mode == VOIDmode)
- {
- /* We don't know how to interpret negative-looking numbers in
- this case, so don't try to fold those. */
- if (hv < 0)
- return 0;
- }
- else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
- ;
- else
+ if (op_mode == VOIDmode
+ || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
+ /* We should never get a negative number. */
+ gcc_assert (hv >= 0);
+ else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
hv = 0, lv &= GET_MODE_MASK (op_mode);
REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
- if (GET_CODE (op) == CONST_INT
+ if (CONST_INT_P (op)
&& width <= HOST_BITS_PER_WIDE_INT && width > 0)
{
HOST_WIDE_INT arg0 = INTVAL (op);
break;
case FFS:
- /* Don't use ffs here. Instead, get low order bit and then its
- number. If arg0 is zero, this will return 0, as desired. */
arg0 &= GET_MODE_MASK (mode);
- val = exact_log2 (arg0 & (- arg0)) + 1;
+ val = ffs_hwi (arg0);
break;
case CLZ:
if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
;
else
- val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
+ val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
+ break;
+
+ case CLRSB:
+ arg0 &= GET_MODE_MASK (mode);
+ if (arg0 == 0)
+ val = GET_MODE_PRECISION (mode) - 1;
+ else if (arg0 >= 0)
+ val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
+ else if (arg0 < 0)
+ val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
break;
case CTZ:
/* Even if the value at zero is undefined, we have to come
up with some replacement. Seems good enough. */
if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
- val = GET_MODE_BITSIZE (mode);
+ val = GET_MODE_PRECISION (mode);
}
else
- val = exact_log2 (arg0 & -arg0);
+ val = ctz_hwi (arg0);
break;
case POPCOUNT:
/* When zero-extending a CONST_INT, we need to know its
original mode. */
gcc_assert (op_mode != VOIDmode);
- if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ if (op_width == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- gcc_assert (width == GET_MODE_BITSIZE (op_mode));
+ gcc_assert (width == op_width);
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
- val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
+ val = arg0 & GET_MODE_MASK (op_mode);
else
return 0;
break;
case SIGN_EXTEND:
if (op_mode == VOIDmode)
op_mode = mode;
- if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ op_width = GET_MODE_PRECISION (op_mode);
+ if (op_width == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- gcc_assert (width == GET_MODE_BITSIZE (op_mode));
+ gcc_assert (width == op_width);
val = arg0;
}
- else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
+ else if (op_width < HOST_BITS_PER_WIDE_INT)
{
- val
- = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
- if (val
- & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
- val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ val = arg0 & GET_MODE_MASK (op_mode);
+ if (val_signbit_known_set_p (op_mode, val))
+ val |= ~GET_MODE_MASK (op_mode);
}
else
return 0;
case SS_TRUNCATE:
case US_TRUNCATE:
case SS_NEG:
+ case US_NEG:
+ case SS_ABS:
return 0;
default:
/* We can do some operations on integer CONST_DOUBLEs. Also allow
for a DImode operation on a CONST_INT. */
else if (GET_MODE (op) == VOIDmode
- && width <= HOST_BITS_PER_WIDE_INT * 2
+ && width <= HOST_BITS_PER_DOUBLE_INT
&& (GET_CODE (op) == CONST_DOUBLE
- || GET_CODE (op) == CONST_INT))
+ || CONST_INT_P (op)))
{
unsigned HOST_WIDE_INT l1, lv;
HOST_WIDE_INT h1, hv;
case FFS:
hv = 0;
- if (l1 == 0)
- {
- if (h1 == 0)
- lv = 0;
- else
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
- }
+ if (l1 != 0)
+ lv = ffs_hwi (l1);
+ else if (h1 != 0)
+ lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
else
- lv = exact_log2 (l1 & -l1) + 1;
+ lv = 0;
break;
case CLZ:
hv = 0;
if (h1 != 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
+ lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
- HOST_BITS_PER_WIDE_INT;
else if (l1 != 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
+ lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
- lv = GET_MODE_BITSIZE (mode);
+ lv = GET_MODE_PRECISION (mode);
break;
case CTZ:
hv = 0;
if (l1 != 0)
- lv = exact_log2 (l1 & -l1);
+ lv = ctz_hwi (l1);
else if (h1 != 0)
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
+ lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
- lv = GET_MODE_BITSIZE (mode);
+ lv = GET_MODE_PRECISION (mode);
break;
case POPCOUNT:
case ZERO_EXTEND:
gcc_assert (op_mode != VOIDmode);
- if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ if (op_width > HOST_BITS_PER_WIDE_INT)
return 0;
hv = 0;
case SIGN_EXTEND:
if (op_mode == VOIDmode
- || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ || op_width > HOST_BITS_PER_WIDE_INT)
return 0;
else
{
lv = l1 & GET_MODE_MASK (op_mode);
- if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
- && (lv & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
- lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ if (val_signbit_known_set_p (op_mode, lv))
+ lv |= ~GET_MODE_MASK (op_mode);
hv = HWI_SIGN_EXTEND (lv);
}
}
else if (GET_CODE (op) == CONST_DOUBLE
- && SCALAR_FLOAT_MODE_P (mode))
+ && SCALAR_FLOAT_MODE_P (mode)
+ && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
{
REAL_VALUE_TYPE d, t;
REAL_VALUE_FROM_CONST_DOUBLE (d, op);
d = t;
break;
case ABS:
- d = REAL_VALUE_ABS (d);
+ d = real_value_abs (&d);
break;
case NEG:
- d = REAL_VALUE_NEGATE (d);
+ d = real_value_negate (&d);
break;
case FLOAT_TRUNCATE:
d = real_value_truncate (mode, d);
break;
case FLOAT_EXTEND:
- /* All this does is change the mode. */
+ /* All this does is change the mode, unless changing
+ mode class. */
+ if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
+ real_convert (&d, mode, &d);
break;
case FIX:
real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
else if (GET_CODE (op) == CONST_DOUBLE
&& SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& GET_MODE_CLASS (mode) == MODE_INT
- && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
+ && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
{
/* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
operators are intentionally left unspecified (to ease implementation
/* Test against the signed lower bound. */
if (width > HOST_BITS_PER_WIDE_INT)
{
- th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
+ th = (unsigned HOST_WIDE_INT) (-1)
+ << (width - HOST_BITS_PER_WIDE_INT - 1);
tl = 0;
}
else
{
th = -1;
- tl = (HOST_WIDE_INT) -1 << (width - 1);
+ tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
}
real_from_integer (&t, VOIDmode, tl, th, 0);
if (REAL_VALUES_LESS (x, t))
return const0_rtx;
/* Test against the unsigned upper bound. */
- if (width == 2*HOST_BITS_PER_WIDE_INT)
+ if (width == HOST_BITS_PER_DOUBLE_INT)
{
th = -1;
tl = -1;
}
/* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
- tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
- ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
- : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
+ tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
if (tem != 0)
return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
/* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
- tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
- ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
- : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
+ tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
if (tem != 0)
return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
}
{
rtx tem, reversed, opleft, opright;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
/* Even if we can't compute a constant result,
there are some cases worth simplifying. */
to CONST_INT since overflow won't be computed properly if wider
than HOST_BITS_PER_WIDE_INT. */
- if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
- && GET_CODE (op1) == CONST_INT)
- return plus_constant (op0, INTVAL (op1));
- else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
- && GET_CODE (op0) == CONST_INT)
- return plus_constant (op1, INTVAL (op0));
+ if ((GET_CODE (op0) == CONST
+ || GET_CODE (op0) == SYMBOL_REF
+ || GET_CODE (op0) == LABEL_REF)
+ && CONST_INT_P (op1))
+ return plus_constant (mode, op0, INTVAL (op1));
+ else if ((GET_CODE (op1) == CONST
+ || GET_CODE (op1) == SYMBOL_REF
+ || GET_CODE (op1) == LABEL_REF)
+ && CONST_INT_P (op0))
+ return plus_constant (mode, op1, INTVAL (op0));
/* See if this is something like X * C - X or vice versa or
if the multiplication is written as a shift. If so, we can
if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
- unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
+ double_int coeff0, coeff1;
rtx lhs = op0, rhs = op1;
+ coeff0 = double_int_one;
+ coeff1 = double_int_one;
+
if (GET_CODE (lhs) == NEG)
{
- coeff0l = -1;
- coeff0h = -1;
+ coeff0 = double_int_minus_one;
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0l = INTVAL (XEXP (lhs, 1));
- coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT
- && INTVAL (XEXP (lhs, 1)) >= 0
+ && CONST_INT_P (XEXP (lhs, 1))
+ && INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- coeff0h = 0;
+ coeff0 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- coeff1l = -1;
- coeff1h = -1;
+ coeff1 = double_int_minus_one;
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (rhs, 1)))
{
- coeff1l = INTVAL (XEXP (rhs, 1));
- coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
+ coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (rhs, 1))
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
- coeff1h = 0;
+ coeff1 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
{
rtx orig = gen_rtx_PLUS (mode, op0, op1);
rtx coeff;
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
+ double_int val;
+ bool speed = optimize_function_for_speed_p (cfun);
- add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
- coeff = immed_double_const (l, h, mode);
+ val = double_int_add (coeff0, coeff1);
+ coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
- return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
? tem : 0;
}
}
/* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& GET_CODE (op0) == XOR
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
&& mode_signbit_p (mode, op1))
return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
return tem;
/* Reassociate floating point addition only when the user
- specifies unsafe math optimizations. */
+ specifies associative math operations. */
if (FLOAT_MODE_P (mode)
- && flag_unsafe_math_optimizations)
+ && flag_associative_math)
{
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
break;
case COMPARE:
-#ifdef HAVE_cc0
- /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
- using cc0, in which case we want to leave it as a COMPARE
- so we can distinguish it from a register-register-copy.
-
- In IEEE floating point, x-0 is not the same as x. */
-
- if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
- && trueop1 == CONST0_RTX (mode))
- return op0;
-#endif
-
/* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
|| (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
case MINUS:
/* We can't assume x-x is 0 even with non-IEEE floating point,
but since it is zero except in very strange circumstances, we
- will treat it as zero with -funsafe-math-optimizations and
- -ffinite-math-only. */
+ will treat it as zero with -ffinite-math-only. */
if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
- && (! FLOAT_MODE_P (mode)
- || (flag_unsafe_math_optimizations
- && !HONOR_NANS (mode)
- && !HONOR_INFINITIES (mode))))
+ && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
return CONST0_RTX (mode);
/* Change subtraction from zero into negation. (0 - x) is the
if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
- unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
+ double_int coeff0, negcoeff1;
rtx lhs = op0, rhs = op1;
+ coeff0 = double_int_one;
+ negcoeff1 = double_int_minus_one;
+
if (GET_CODE (lhs) == NEG)
{
- coeff0l = -1;
- coeff0h = -1;
+ coeff0 = double_int_minus_one;
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0l = INTVAL (XEXP (lhs, 1));
- coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (lhs, 1))
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- coeff0h = 0;
+ coeff0 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- negcoeff1l = 1;
- negcoeff1h = 0;
+ negcoeff1 = double_int_one;
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (rhs, 1)))
{
- negcoeff1l = -INTVAL (XEXP (rhs, 1));
- negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
+ negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (rhs, 1))
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
- negcoeff1h = -1;
+ negcoeff1 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (rhs, 1)));
+ negcoeff1 = double_int_neg (negcoeff1);
rhs = XEXP (rhs, 0);
}
{
rtx orig = gen_rtx_MINUS (mode, op0, op1);
rtx coeff;
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
+ double_int val;
+ bool speed = optimize_function_for_speed_p (cfun);
- add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
- coeff = immed_double_const (l, h, mode);
+ val = double_int_add (coeff0, negcoeff1);
+ coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
- return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
? tem : 0;
}
}
/* (-x - c) may be simplified as (-c - x). */
if (GET_CODE (op0) == NEG
- && (GET_CODE (op1) == CONST_INT
+ && (CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE))
{
tem = simplify_unary_operation (NEG, mode, op1, mode);
}
/* Don't let a relocatable value get a negative coeff. */
- if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
+ if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
return simplify_gen_binary (PLUS, mode,
op0,
neg_const_int (mode, op1));
if (trueop1 == constm1_rtx)
return simplify_gen_unary (NEG, mode, op0, mode);
+ if (GET_CODE (op0) == NEG)
+ {
+ rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
+ /* If op1 is a MULT as well and simplify_unary_operation
+ just moved the NEG to the second operand, simplify_gen_binary
+ below could through simplify_associative_operation move
+ the NEG around again and recurse endlessly. */
+ if (temp
+ && GET_CODE (op1) == MULT
+ && GET_CODE (temp) == MULT
+ && XEXP (op1, 0) == XEXP (temp, 0)
+ && GET_CODE (XEXP (temp, 1)) == NEG
+ && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
+ temp = NULL_RTX;
+ if (temp)
+ return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
+ }
+ if (GET_CODE (op1) == NEG)
+ {
+ rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
+ /* If op0 is a MULT as well and simplify_unary_operation
+ just moved the NEG to the second operand, simplify_gen_binary
+ below could through simplify_associative_operation move
+ the NEG around again and recurse endlessly. */
+ if (temp
+ && GET_CODE (op0) == MULT
+ && GET_CODE (temp) == MULT
+ && XEXP (op0, 0) == XEXP (temp, 0)
+ && GET_CODE (XEXP (temp, 1)) == NEG
+ && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
+ temp = NULL_RTX;
+ if (temp)
+ return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
+ }
+
/* Maybe simplify x * 0 to 0. The reduction is not valid if
x is NaN, since x * 0 is then also NaN. Nor is it valid
when the mode has signed zeros, since multiplying a negative
/* Convert multiply by constant power of two into shift unless
we are still generating RTL. This test is a kludge. */
- if (GET_CODE (trueop1) == CONST_INT
- && (val = exact_log2 (INTVAL (trueop1))) >= 0
+ if (CONST_INT_P (trueop1)
+ && (val = exact_log2 (UINTVAL (trueop1))) >= 0
/* If the mode is larger than the host word size, and the
uppermost bit is set, then this isn't a power of two due
to implicit sign extension. */
|| GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
&& GET_MODE (op0) == mode
&& CONST_DOUBLE_LOW (trueop1) == 0
- && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
+ && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
+ && (val < HOST_BITS_PER_DOUBLE_INT - 1
+ || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
return simplify_gen_binary (ASHIFT, mode, op0,
GEN_INT (val + HOST_BITS_PER_WIDE_INT));
/* x*2 is x+x and x*(-1) is -x */
if (GET_CODE (trueop1) == CONST_DOUBLE
&& SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
+ && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
&& GET_MODE (op0) == mode)
{
REAL_VALUE_TYPE d;
break;
case IOR:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
- if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
return constm1_rtx;
/* (ior A C) is C if all bits of A that might be nonzero are on in C. */
- if (GET_CODE (op1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
+ if (CONST_INT_P (op1)
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
return op1;
-
+
/* Canonicalize (X & C1) | C2. */
if (GET_CODE (op0) == AND
- && GET_CODE (trueop1) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ && CONST_INT_P (trueop1)
+ && CONST_INT_P (XEXP (op0, 1)))
{
HOST_WIDE_INT mask = GET_MODE_MASK (mode);
HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
&& rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
- && GET_CODE (XEXP (opleft, 1)) == CONST_INT
- && GET_CODE (XEXP (opright, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opleft, 1))
+ && CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
/* Same, but for ashift that has been "simplified" to a wider mode
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
&& rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
SUBREG_REG (XEXP (opright, 0)))
- && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
- && GET_CODE (XEXP (opright, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
+ && CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0),
XEXP (SUBREG_REG (opleft), 1));
/* If we have (ior (and (X C1) C2)), simplify this by making
C1 as small as possible if C1 actually changes. */
- if (GET_CODE (op1) == CONST_INT
- && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (CONST_INT_P (op1)
+ && (HWI_COMPUTABLE_MODE_P (mode)
|| INTVAL (op1) > 0)
&& GET_CODE (op0) == AND
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && GET_CODE (op1) == CONST_INT
- && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
+ && CONST_INT_P (XEXP (op0, 1))
+ && CONST_INT_P (op1)
+ && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
return simplify_gen_binary (IOR, mode,
simplify_gen_binary
(AND, mode, XEXP (op0, 0),
- GEN_INT (INTVAL (XEXP (op0, 1))
- & ~INTVAL (op1))),
+ GEN_INT (UINTVAL (XEXP (op0, 1))
+ & ~UINTVAL (op1))),
op1);
/* If OP0 is (ashiftrt (plus ...) C), it might actually be
the PLUS does not affect any of the bits in OP1: then we can do
the IOR as a PLUS and we can associate. This is valid if OP1
can be safely shifted left C bits. */
- if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
+ if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
&& GET_CODE (XEXP (op0, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
+ && CONST_INT_P (XEXP (op0, 1))
&& INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
{
int count = INTVAL (XEXP (op0, 1));
if (mask >> count == INTVAL (trueop1)
&& (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
return simplify_gen_binary (ASHIFTRT, mode,
- plus_constant (XEXP (op0, 0), mask),
+ plus_constant (mode, XEXP (op0, 0),
+ mask),
XEXP (op0, 1));
}
break;
case XOR:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
- if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
return simplify_gen_unary (NOT, mode, op0, mode);
if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
return CONST0_RTX (mode);
/* Canonicalize XOR of the most significant bit to PLUS. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& mode_signbit_p (mode, op1))
return simplify_gen_binary (PLUS, mode, op0, op1);
/* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& GET_CODE (op0) == PLUS
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
&& mode_signbit_p (mode, XEXP (op0, 1)))
return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
convert them into an IOR. This helps to detect rotation encoded
using those methods and possibly other simplifications. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& (nonzero_bits (op0, mode)
& nonzero_bits (op1, mode)) == 0)
return (simplify_gen_binary (IOR, mode, op0, op1));
XEXP (op0, 1), mode),
op1);
+ /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
+ we can transform like this:
+ (A&B)^C == ~(A&B)&C | ~C&(A&B)
+ == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
+ == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
+ Attempt a few simplifications when B and C are both constants. */
+ if (GET_CODE (op0) == AND
+ && CONST_INT_P (op1)
+ && CONST_INT_P (XEXP (op0, 1)))
+ {
+ rtx a = XEXP (op0, 0);
+ rtx b = XEXP (op0, 1);
+ rtx c = op1;
+ HOST_WIDE_INT bval = INTVAL (b);
+ HOST_WIDE_INT cval = INTVAL (c);
+
+ rtx na_c
+ = simplify_binary_operation (AND, mode,
+ simplify_gen_unary (NOT, mode, a, mode),
+ c);
+ if ((~cval & bval) == 0)
+ {
+ /* Try to simplify ~A&C | ~B&C. */
+ if (na_c != NULL_RTX)
+ return simplify_gen_binary (IOR, mode, na_c,
+ GEN_INT (~bval & cval));
+ }
+ else
+ {
+ /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
+ if (na_c == const0_rtx)
+ {
+ rtx a_nc_b = simplify_gen_binary (AND, mode, a,
+ GEN_INT (~cval & bval));
+ return simplify_gen_binary (IOR, mode, a_nc_b,
+ GEN_INT (~bval & cval));
+ }
+ }
+ }
+
/* (xor (comparison foo bar) (const_int 1)) can become the reversed
comparison if STORE_FLAG_VALUE is 1. */
if (STORE_FLAG_VALUE == 1
if (STORE_FLAG_VALUE == 1
&& trueop1 == const1_rtx
&& GET_CODE (op0) == LSHIFTRT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op0, 1))
+ && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
/* (xor (comparison foo bar) (const_int sign-bit))
when STORE_FLAG_VALUE is the sign bit. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ if (val_signbit_p (mode, STORE_FLAG_VALUE)
&& trueop1 == const_true_rtx
&& COMPARISON_P (op0)
&& (reversed = reversed_comparison (op0, mode)))
return reversed;
- break;
-
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
case AND:
if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
return trueop1;
- /* If we are turning off bits already known off in OP0, we need
- not do an AND. */
- if (GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
return op0;
+ if (HWI_COMPUTABLE_MODE_P (mode))
+ {
+ HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
+ HOST_WIDE_INT nzop1;
+ if (CONST_INT_P (trueop1))
+ {
+ HOST_WIDE_INT val1 = INTVAL (trueop1);
+ /* If we are turning off bits already known off in OP0, we need
+ not do an AND. */
+ if ((nzop0 & ~val1) == 0)
+ return op0;
+ }
+ nzop1 = nonzero_bits (trueop1, mode);
+ /* If we are clearing all the nonzero bits, the result is zero. */
+ if ((nzop1 & nzop0) == 0
+ && !side_effects_p (op0) && !side_effects_p (op1))
+ return CONST0_RTX (mode);
+ }
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return op0;
there are no nonzero bits of C outside of X's mode. */
if ((GET_CODE (op0) == SIGN_EXTEND
|| GET_CODE (op0) == ZERO_EXTEND)
- && GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && CONST_INT_P (trueop1)
+ && HWI_COMPUTABLE_MODE_P (mode)
&& (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
- & INTVAL (trueop1)) == 0)
+ & UINTVAL (trueop1)) == 0)
{
enum machine_mode imode = GET_MODE (XEXP (op0, 0));
tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
}
+ /* Transform (and (truncate X) C) into (truncate (and X C)). This way
+ we might be able to further simplify the AND with X and potentially
+ remove the truncation altogether. */
+ if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
+ {
+ rtx x = XEXP (op0, 0);
+ enum machine_mode xmode = GET_MODE (x);
+ tem = simplify_gen_binary (AND, xmode, x,
+ gen_int_mode (INTVAL (trueop1), xmode));
+ return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
+ }
+
/* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
if (GET_CODE (op0) == IOR
- && GET_CODE (trueop1) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ && CONST_INT_P (trueop1)
+ && CONST_INT_P (XEXP (op0, 1)))
{
HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
return simplify_gen_binary (IOR, mode,
((A & N) + B) & M -> (A + B) & M
Similarly if (N & M) == 0,
((A | N) + B) & M -> (A + B) & M
- and for - instead of + and/or ^ instead of |. */
- if (GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ~INTVAL (trueop1)
- && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
+ and for - instead of + and/or ^ instead of |.
+ Also, if (N & M) == 0, then
+ (A +- N) & M -> A & M. */
+ if (CONST_INT_P (trueop1)
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && ~UINTVAL (trueop1)
+ && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
&& (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
{
rtx pmop[2];
pmop[0] = XEXP (op0, 0);
pmop[1] = XEXP (op0, 1);
+ if (CONST_INT_P (pmop[1])
+ && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
+ return simplify_gen_binary (AND, mode, pmop[0], op1);
+
for (which = 0; which < 2; which++)
{
tem = pmop[which];
switch (GET_CODE (tem))
{
case AND:
- if (GET_CODE (XEXP (tem, 1)) == CONST_INT
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
- == INTVAL (trueop1))
+ if (CONST_INT_P (XEXP (tem, 1))
+ && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
+ == UINTVAL (trueop1))
pmop[which] = XEXP (tem, 0);
break;
case IOR:
case XOR:
- if (GET_CODE (XEXP (tem, 1)) == CONST_INT
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
+ if (CONST_INT_P (XEXP (tem, 1))
+ && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
pmop[which] = XEXP (tem, 0);
break;
default:
return simplify_gen_binary (code, mode, tem, op1);
}
}
+
+ /* (and X (ior (not X) Y) -> (and X Y) */
+ if (GET_CODE (op1) == IOR
+ && GET_CODE (XEXP (op1, 0)) == NOT
+ && op0 == XEXP (XEXP (op1, 0), 0))
+ return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
+
+ /* (and (ior (not X) Y) X) -> (and X Y) */
+ if (GET_CODE (op0) == IOR
+ && GET_CODE (XEXP (op0, 0)) == NOT
+ && op1 == XEXP (XEXP (op0, 0), 0))
+ return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
+
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
if (trueop1 == CONST1_RTX (mode))
return rtl_hooks.gen_lowpart_no_emit (mode, op0);
/* Convert divide by power of two into shift. */
- if (GET_CODE (trueop1) == CONST_INT
- && (val = exact_log2 (INTVAL (trueop1))) > 0)
+ if (CONST_INT_P (trueop1)
+ && (val = exact_log2 (UINTVAL (trueop1))) > 0)
return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
break;
return simplify_gen_unary (NEG, mode, op0, mode);
/* Change FP division by a constant into multiplication.
- Only do this with -funsafe-math-optimizations. */
- if (flag_unsafe_math_optimizations
+ Only do this with -freciprocal-math. */
+ if (flag_reciprocal_math
&& !REAL_VALUES_EQUAL (d, dconst0))
{
REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
}
}
}
- else
+ else if (SCALAR_INT_MODE_P (mode))
{
/* 0/x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == CONST0_RTX (mode))
+ if (trueop0 == CONST0_RTX (mode)
+ && !cfun->can_throw_non_call_exceptions)
{
if (side_effects_p (op1))
return simplify_gen_binary (AND, mode, op1, trueop0);
return CONST0_RTX (mode);
}
/* Implement modulus by power of two as AND. */
- if (GET_CODE (trueop1) == CONST_INT
- && exact_log2 (INTVAL (trueop1)) > 0)
+ if (CONST_INT_P (trueop1)
+ && exact_log2 (UINTVAL (trueop1)) > 0)
return simplify_gen_binary (AND, mode, op0,
GEN_INT (INTVAL (op1) - 1));
break;
if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
return op0;
/* Rotating ~0 always results in ~0. */
- if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
+ if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
+ && UINTVAL (trueop0) == GET_MODE_MASK (mode)
&& ! side_effects_p (op1))
return op0;
+ canonicalize_shift:
+ if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
+ {
+ val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
+ if (val != INTVAL (op1))
+ return simplify_gen_binary (code, mode, op0, GEN_INT (val));
+ }
break;
case ASHIFT:
case SS_ASHIFT:
+ case US_ASHIFT:
if (trueop1 == CONST0_RTX (mode))
return op0;
if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
return op0;
- break;
+ goto canonicalize_shift;
case LSHIFTRT:
if (trueop1 == CONST0_RTX (mode))
return op0;
/* Optimize (lshiftrt (clz X) C) as (eq X 0). */
if (GET_CODE (op0) == CLZ
- && GET_CODE (trueop1) == CONST_INT
+ && CONST_INT_P (trueop1)
&& STORE_FLAG_VALUE == 1
&& INTVAL (trueop1) < (HOST_WIDE_INT)width)
{
unsigned HOST_WIDE_INT zero_val = 0;
if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
- && zero_val == GET_MODE_BITSIZE (imode)
+ && zero_val == GET_MODE_PRECISION (imode)
&& INTVAL (trueop1) == exact_log2 (zero_val))
return simplify_gen_relational (EQ, mode, imode,
XEXP (op0, 0), const0_rtx);
}
- break;
+ goto canonicalize_shift;
case SMIN:
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT
- && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
+ && mode_signbit_p (mode, trueop1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
case SMAX:
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT
- && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
- == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
+ && CONST_INT_P (trueop1)
+ && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
case US_PLUS:
case SS_MINUS:
case US_MINUS:
+ case SS_MULT:
+ case US_MULT:
+ case SS_DIV:
+ case US_DIV:
/* ??? There are simplifications that can be done. */
return 0;
gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
gcc_assert (GET_CODE (trueop1) == PARALLEL);
gcc_assert (XVECLEN (trueop1, 0) == 1);
- gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
+ gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
if (GET_CODE (trueop0) == CONST_VECTOR)
return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
(trueop1, 0, 0)));
+
+ /* Extract a scalar element from a nested VEC_SELECT expression
+ (with optional nested VEC_CONCAT expression). Some targets
+ (i386) extract scalar element from a vector using chain of
+ nested VEC_SELECT expressions. When input operand is a memory
+ operand, this operation can be simplified to a simple scalar
+ load from an offseted memory address. */
+ if (GET_CODE (trueop0) == VEC_SELECT)
+ {
+ rtx op0 = XEXP (trueop0, 0);
+ rtx op1 = XEXP (trueop0, 1);
+
+ enum machine_mode opmode = GET_MODE (op0);
+ int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
+ int n_elts = GET_MODE_SIZE (opmode) / elt_size;
+
+ int i = INTVAL (XVECEXP (trueop1, 0, 0));
+ int elem;
+
+ rtvec vec;
+ rtx tmp_op, tmp;
+
+ gcc_assert (GET_CODE (op1) == PARALLEL);
+ gcc_assert (i < n_elts);
+
+ /* Select element, pointed by nested selector. */
+ elem = INTVAL (XVECEXP (op1, 0, i));
+
+ /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
+ if (GET_CODE (op0) == VEC_CONCAT)
+ {
+ rtx op00 = XEXP (op0, 0);
+ rtx op01 = XEXP (op0, 1);
+
+ enum machine_mode mode00, mode01;
+ int n_elts00, n_elts01;
+
+ mode00 = GET_MODE (op00);
+ mode01 = GET_MODE (op01);
+
+ /* Find out number of elements of each operand. */
+ if (VECTOR_MODE_P (mode00))
+ {
+ elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
+ n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
+ }
+ else
+ n_elts00 = 1;
+
+ if (VECTOR_MODE_P (mode01))
+ {
+ elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
+ n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
+ }
+ else
+ n_elts01 = 1;
+
+ gcc_assert (n_elts == n_elts00 + n_elts01);
+
+ /* Select correct operand of VEC_CONCAT
+ and adjust selector. */
+ if (elem < n_elts01)
+ tmp_op = op00;
+ else
+ {
+ tmp_op = op01;
+ elem -= n_elts00;
+ }
+ }
+ else
+ tmp_op = op0;
+
+ vec = rtvec_alloc (1);
+ RTVEC_ELT (vec, 0) = GEN_INT (elem);
+
+ tmp = gen_rtx_fmt_ee (code, mode,
+ tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
+ return tmp;
+ }
+ if (GET_CODE (trueop0) == VEC_DUPLICATE
+ && GET_MODE (XEXP (trueop0, 0)) == mode)
+ return XEXP (trueop0, 0);
}
else
{
{
rtx x = XVECEXP (trueop1, 0, i);
- gcc_assert (GET_CODE (x) == CONST_INT);
+ gcc_assert (CONST_INT_P (x));
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
INTVAL (x));
}
return gen_rtx_CONST_VECTOR (mode, v);
}
+
+ /* If we build {a,b} then permute it, build the result directly. */
+ if (XVECLEN (trueop1, 0) == 2
+ && CONST_INT_P (XVECEXP (trueop1, 0, 0))
+ && CONST_INT_P (XVECEXP (trueop1, 0, 1))
+ && GET_CODE (trueop0) == VEC_CONCAT
+ && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
+ && GET_MODE (XEXP (trueop0, 0)) == mode
+ && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
+ && GET_MODE (XEXP (trueop0, 1)) == mode)
+ {
+ unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
+ unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
+ rtx subop0, subop1;
+
+ gcc_assert (i0 < 4 && i1 < 4);
+ subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
+ subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
+
+ return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
+ }
}
if (XVECLEN (trueop1, 0) == 1
- && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
+ && CONST_INT_P (XVECEXP (trueop1, 0, 0))
&& GET_CODE (trueop0) == VEC_CONCAT)
{
rtx vec = trueop0;
gcc_assert (GET_MODE_INNER (mode) == op1_mode);
if ((GET_CODE (trueop0) == CONST_VECTOR
- || GET_CODE (trueop0) == CONST_INT
+ || CONST_INT_P (trueop0)
|| GET_CODE (trueop0) == CONST_DOUBLE)
&& (GET_CODE (trueop1) == CONST_VECTOR
- || GET_CODE (trueop1) == CONST_INT
+ || CONST_INT_P (trueop1)
|| GET_CODE (trueop1) == CONST_DOUBLE))
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
{
HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
if (VECTOR_MODE_P (mode)
&& code != VEC_CONCAT
if (VECTOR_MODE_P (mode)
&& code == VEC_CONCAT
- && CONSTANT_P (op0) && CONSTANT_P (op1))
+ && (CONST_INT_P (op0)
+ || GET_CODE (op0) == CONST_DOUBLE
+ || GET_CODE (op0) == CONST_FIXED)
+ && (CONST_INT_P (op1)
+ || GET_CODE (op1) == CONST_DOUBLE
+ || GET_CODE (op1) == CONST_FIXED))
{
unsigned n_elts = GET_MODE_NUNITS (mode);
rtvec v = rtvec_alloc (n_elts);
is unable to accurately represent the result. */
if ((flag_rounding_math
- || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
- && !flag_unsafe_math_optimizations))
+ || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
&& (inexact || !real_identical (&result, &value)))
return NULL_RTX;
/* We can fold some multi-word operations. */
if (GET_MODE_CLASS (mode) == MODE_INT
- && width == HOST_BITS_PER_WIDE_INT * 2
- && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
- && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
+ && width == HOST_BITS_PER_DOUBLE_INT
+ && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
+ && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
{
- unsigned HOST_WIDE_INT l1, l2, lv, lt;
- HOST_WIDE_INT h1, h2, hv, ht;
-
- if (GET_CODE (op0) == CONST_DOUBLE)
- l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
- else
- l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
+ double_int o0, o1, res, tmp;
- if (GET_CODE (op1) == CONST_DOUBLE)
- l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
- else
- l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
+ o0 = rtx_to_double_int (op0);
+ o1 = rtx_to_double_int (op1);
switch (code)
{
case MINUS:
/* A - B == A + (-B). */
- neg_double (l2, h2, &lv, &hv);
- l2 = lv, h2 = hv;
+ o1 = double_int_neg (o1);
/* Fall through.... */
case PLUS:
- add_double (l1, h1, l2, h2, &lv, &hv);
+ res = double_int_add (o0, o1);
break;
case MULT:
- mul_double (l1, h1, l2, h2, &lv, &hv);
+ res = double_int_mul (o0, o1);
break;
case DIV:
- if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
- &lv, &hv, <, &ht))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+ o0.low, o0.high, o1.low, o1.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high))
return 0;
break;
case MOD:
- if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
- <, &ht, &lv, &hv))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+ o0.low, o0.high, o1.low, o1.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high))
return 0;
break;
case UDIV:
- if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
- &lv, &hv, <, &ht))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+ o0.low, o0.high, o1.low, o1.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high))
return 0;
break;
case UMOD:
- if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
- <, &ht, &lv, &hv))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+ o0.low, o0.high, o1.low, o1.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high))
return 0;
break;
case AND:
- lv = l1 & l2, hv = h1 & h2;
+ res = double_int_and (o0, o1);
break;
case IOR:
- lv = l1 | l2, hv = h1 | h2;
+ res = double_int_ior (o0, o1);
break;
case XOR:
- lv = l1 ^ l2, hv = h1 ^ h2;
+ res = double_int_xor (o0, o1);
break;
case SMIN:
- if (h1 < h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- < (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_smin (o0, o1);
break;
case SMAX:
- if (h1 > h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- > (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_smax (o0, o1);
break;
case UMIN:
- if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- < (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_umin (o0, o1);
break;
case UMAX:
- if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- > (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_umax (o0, o1);
break;
case LSHIFTRT: case ASHIFTRT:
case ASHIFT:
case ROTATE: case ROTATERT:
- if (SHIFT_COUNT_TRUNCATED)
- l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
-
- if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
- return 0;
+ {
+ unsigned HOST_WIDE_INT cnt;
- if (code == LSHIFTRT || code == ASHIFTRT)
- rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
- code == ASHIFTRT);
- else if (code == ASHIFT)
- lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
- else if (code == ROTATE)
- lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
- else /* code == ROTATERT */
- rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+ if (SHIFT_COUNT_TRUNCATED)
+ {
+ o1.high = 0;
+ o1.low &= GET_MODE_PRECISION (mode) - 1;
+ }
+
+ if (!double_int_fits_in_uhwi_p (o1)
+ || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
+ return 0;
+
+ cnt = double_int_to_uhwi (o1);
+
+ if (code == LSHIFTRT || code == ASHIFTRT)
+ res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
+ code == ASHIFTRT);
+ else if (code == ASHIFT)
+ res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
+ true);
+ else if (code == ROTATE)
+ res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
+ else /* code == ROTATERT */
+ res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
+ }
break;
default:
return 0;
}
- return immed_double_const (lv, hv, mode);
+ return immed_double_int_const (res, mode);
}
- if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
+ if (CONST_INT_P (op0) && CONST_INT_P (op1)
&& width <= HOST_BITS_PER_WIDE_INT && width != 0)
{
/* Get the integer argument values in two forms:
if (width < HOST_BITS_PER_WIDE_INT)
{
- arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
- arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
+ arg0 &= GET_MODE_MASK (mode);
+ arg1 &= GET_MODE_MASK (mode);
arg0s = arg0;
- if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg0s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, arg0s))
+ arg0s |= ~GET_MODE_MASK (mode);
- arg1s = arg1;
- if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg1s |= ((HOST_WIDE_INT) (-1) << width);
+ arg1s = arg1;
+ if (val_signbit_known_set_p (mode, arg1s))
+ arg1s |= ~GET_MODE_MASK (mode);
}
else
{
arg0s = arg0;
arg1s = arg1;
}
-
+
/* Compute the value of the arithmetic. */
-
+
switch (code)
{
case PLUS:
val = arg0s + arg1s;
break;
-
+
case MINUS:
val = arg0s - arg1s;
break;
-
+
case MULT:
val = arg0s * arg1s;
break;
-
+
case DIV:
if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = arg0s / arg1s;
break;
-
+
case MOD:
if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = arg0s % arg1s;
break;
-
+
case UDIV:
if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = (unsigned HOST_WIDE_INT) arg0 / arg1;
break;
-
+
case UMOD:
if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = (unsigned HOST_WIDE_INT) arg0 % arg1;
break;
-
+
case AND:
val = arg0 & arg1;
break;
-
+
case IOR:
val = arg0 | arg1;
break;
-
+
case XOR:
val = arg0 ^ arg1;
break;
-
+
case LSHIFTRT:
case ASHIFT:
case ASHIFTRT:
arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
return 0;
-
+
val = (code == ASHIFT
? ((unsigned HOST_WIDE_INT) arg0) << arg1
: ((unsigned HOST_WIDE_INT) arg0) >> arg1);
-
+
/* Sign-extend the result for arithmetic right shifts. */
if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
- val |= ((HOST_WIDE_INT) -1) << (width - arg1);
+ val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
break;
-
+
case ROTATERT:
if (arg1 < 0)
return 0;
-
+
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
| (((unsigned HOST_WIDE_INT) arg0) >> arg1));
break;
-
+
case ROTATE:
if (arg1 < 0)
return 0;
-
+
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
| (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
break;
-
+
case COMPARE:
/* Do nothing here. */
return 0;
-
+
case SMIN:
val = arg0s <= arg1s ? arg0s : arg1s;
break;
-
+
case UMIN:
val = ((unsigned HOST_WIDE_INT) arg0
<= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SMAX:
val = arg0s > arg1s ? arg0s : arg1s;
break;
-
+
case UMAX:
val = ((unsigned HOST_WIDE_INT) arg0
> (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SS_PLUS:
case US_PLUS:
case SS_MINUS:
case US_MINUS:
+ case SS_MULT:
+ case US_MULT:
+ case SS_DIV:
+ case US_DIV:
case SS_ASHIFT:
+ case US_ASHIFT:
/* ??? There are simplifications that can be done. */
return 0;
-
+
default:
gcc_unreachable ();
}
short neg;
};
-static int
-simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
+static bool
+simplify_plus_minus_op_data_cmp (rtx x, rtx y)
{
- const struct simplify_plus_minus_op_data *d1 = p1;
- const struct simplify_plus_minus_op_data *d2 = p2;
int result;
- result = (commutative_operand_precedence (d2->op)
- - commutative_operand_precedence (d1->op));
+ result = (commutative_operand_precedence (y)
+ - commutative_operand_precedence (x));
if (result)
- return result;
+ return result > 0;
/* Group together equal REGs to do more simplification. */
- if (REG_P (d1->op) && REG_P (d2->op))
- return REGNO (d1->op) - REGNO (d2->op);
+ if (REG_P (x) && REG_P (y))
+ return REGNO (x) > REGNO (y);
else
- return 0;
+ return false;
}
static rtx
/* ~a -> (-a - 1) */
if (n_ops != 7)
{
- ops[n_ops].op = constm1_rtx;
+ ops[n_ops].op = CONSTM1_RTX (mode);
ops[n_ops++].neg = this_neg;
ops[i].op = XEXP (this_op, 0);
ops[i].neg = !this_neg;
{
struct simplify_plus_minus_op_data save;
j = i - 1;
- if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
+ if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
continue;
canonicalized = 1;
save = ops[i];
do
ops[j + 1] = ops[j];
- while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
+ while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
ops[j + 1] = save;
}
- /* This is only useful the first time through. */
- if (!canonicalized)
- return NULL_RTX;
-
changed = 0;
for (i = n_ops - 1; i > 0; i--)
for (j = i - 1; j >= 0; j--)
else if (swap_commutative_operands_p (lhs, rhs))
tem = lhs, lhs = rhs, rhs = tem;
- if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
- && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
+ if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
+ && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
{
rtx tem_lhs, tem_rhs;
}
else
tem = simplify_binary_operation (ncode, mode, lhs, rhs);
-
+
/* Reject "simplifications" that just wrap the two
arguments in a CONST. Failure to do so can result
in infinite recursion with simplify_binary_operation
lneg &= rneg;
if (GET_CODE (tem) == NEG)
tem = XEXP (tem, 0), lneg = !lneg;
- if (GET_CODE (tem) == CONST_INT && lneg)
+ if (CONST_INT_P (tem) && lneg)
tem = neg_const_int (mode, tem), lneg = 0;
ops[i].op = tem;
ops[i].neg = lneg;
ops[j].op = NULL_RTX;
changed = 1;
+ canonicalized = 1;
}
}
}
+ /* If nothing changed, fail. */
+ if (!canonicalized)
+ return NULL_RTX;
+
/* Pack all the operands to the lower-numbered entries. */
for (i = 0, j = 0; j < n_ops; j++)
if (ops[j].op)
/* Create (minus -C X) instead of (neg (const (plus X C))). */
if (n_ops == 2
- && GET_CODE (ops[1].op) == CONST_INT
+ && CONST_INT_P (ops[1].op)
&& CONSTANT_P (ops[0].op)
&& ops[0].neg)
return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
-
+
/* We suppressed creation of trivial CONST expressions in the
combination loop to avoid recursion. Create one manually now.
The combination loop should have ensured that there is exactly
in the array and that any other constant will be next-to-last. */
if (n_ops > 1
- && GET_CODE (ops[n_ops - 1].op) == CONST_INT
+ && CONST_INT_P (ops[n_ops - 1].op)
&& CONSTANT_P (ops[n_ops - 2].op))
{
rtx value = ops[n_ops - 1].op;
if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
value = neg_const_int (mode, value);
- ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
+ ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
+ INTVAL (value));
n_ops--;
}
/* Check whether an operand is suitable for calling simplify_plus_minus. */
static bool
-plus_minus_operand_p (rtx x)
+plus_minus_operand_p (const_rtx x)
{
return GET_CODE (x) == PLUS
|| GET_CODE (x) == MINUS
}
#else
return NULL_RTX;
-#endif
+#endif
}
if (VECTOR_MODE_P (mode))
{
/* If op0 is a compare, extract the comparison arguments from it. */
if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
- return simplify_relational_operation (code, mode, VOIDmode,
- XEXP (op0, 0), XEXP (op0, 1));
+ return simplify_gen_relational (code, mode, VOIDmode,
+ XEXP (op0, 0), XEXP (op0, 1));
if (GET_MODE_CLASS (cmp_mode) == MODE_CC
|| CC0_P (op0))
}
}
+ /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
+ (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
+ if ((code == LTU || code == GEU)
+ && GET_CODE (op0) == PLUS
+ && CONST_INT_P (XEXP (op0, 1))
+ && (rtx_equal_p (op1, XEXP (op0, 0))
+ || rtx_equal_p (op1, XEXP (op0, 1))))
+ {
+ rtx new_cmp
+ = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
+ return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
+ cmp_mode, XEXP (op0, 0), new_cmp);
+ }
+
+ /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
+ if ((code == LTU || code == GEU)
+ && GET_CODE (op0) == PLUS
+ && rtx_equal_p (op1, XEXP (op0, 1))
+ /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
+ && !rtx_equal_p (op1, XEXP (op0, 0)))
+ return simplify_gen_relational (code, mode, cmp_mode, op0,
+ copy_rtx (XEXP (op0, 0)));
+
if (op1 == const0_rtx)
{
/* Canonicalize (GTU x 0) as (NE x 0). */
{
rtx x = XEXP (op0, 0);
rtx c = XEXP (op0, 1);
+ enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
+ rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
+
+ /* Detect an infinite recursive condition, where we oscillate at this
+ simplification case between:
+ A + B == C <---> C - B == A,
+ where A, B, and C are all constants with non-simplifiable expressions,
+ usually SYMBOL_REFs. */
+ if (GET_CODE (tem) == invcode
+ && CONSTANT_P (x)
+ && rtx_equal_p (c, XEXP (tem, 1)))
+ return NULL_RTX;
- c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
- cmp_mode, op1, c);
- return simplify_gen_relational (code, mode, cmp_mode, x, c);
+ return simplify_gen_relational (code, mode, cmp_mode, x, tem);
}
/* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
/* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
if ((code == EQ || code == NE)
&& op0code == XOR
- && (GET_CODE (op1) == CONST_INT
+ && (CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
simplify_gen_binary (XOR, cmp_mode,
return NULL_RTX;
}
+enum
+{
+ CMP_EQ = 1,
+ CMP_LT = 2,
+ CMP_GT = 4,
+ CMP_LTU = 8,
+ CMP_GTU = 16
+};
+
+
+/* Convert the known results for EQ, LT, GT, LTU, GTU contained in
+ KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
+ For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
+ logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
+ For floating-point comparisons, assume that the operands were ordered. */
+
+static rtx
+comparison_result (enum rtx_code code, int known_results)
+{
+ switch (code)
+ {
+ case EQ:
+ case UNEQ:
+ return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
+ case NE:
+ case LTGT:
+ return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
+
+ case LT:
+ case UNLT:
+ return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
+ case GE:
+ case UNGE:
+ return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
+
+ case GT:
+ case UNGT:
+ return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
+ case LE:
+ case UNLE:
+ return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
+
+ case LTU:
+ return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
+ case GEU:
+ return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
+
+ case GTU:
+ return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
+ case LEU:
+ return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
+
+ case ORDERED:
+ return const_true_rtx;
+ case UNORDERED:
+ return const0_rtx;
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Check if the given comparison (done in the given MODE) is actually a
tautology or a contradiction.
If no simplification is possible, this function returns zero.
enum machine_mode mode,
rtx op0, rtx op1)
{
- int equal, op0lt, op0ltu, op1lt, op1ltu;
rtx tem;
rtx trueop0;
rtx trueop1;
if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
&& (code == EQ || code == NE)
- && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
- && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
+ && ! ((REG_P (op0) || CONST_INT_P (trueop0))
+ && (REG_P (op1) || CONST_INT_P (trueop1)))
&& 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
/* We cannot do this if tem is a nonzero address. */
&& ! nonzero_address_p (tem))
return const0_rtx;
/* For modes without NaNs, if the two operands are equal, we know the
- result except if they have side-effects. */
- if (! HONOR_NANS (GET_MODE (trueop0))
+ result except if they have side-effects. Even with NaNs we know
+ the result of unordered comparisons and, if signaling NaNs are
+ irrelevant, also the result of LT/GT/LTGT. */
+ if ((! HONOR_NANS (GET_MODE (trueop0))
+ || code == UNEQ || code == UNLE || code == UNGE
+ || ((code == LT || code == GT || code == LTGT)
+ && ! HONOR_SNANS (GET_MODE (trueop0))))
&& rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (trueop0))
- equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
+ return comparison_result (code, CMP_EQ);
/* If the operands are floating-point constants, see if we can fold
the result. */
- else if (GET_CODE (trueop0) == CONST_DOUBLE
- && GET_CODE (trueop1) == CONST_DOUBLE
- && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
+ if (GET_CODE (trueop0) == CONST_DOUBLE
+ && GET_CODE (trueop1) == CONST_DOUBLE
+ && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
{
REAL_VALUE_TYPE d0, d1;
return 0;
}
- equal = REAL_VALUES_EQUAL (d0, d1);
- op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
- op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
+ return comparison_result (code,
+ (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
+ REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
}
/* Otherwise, see if the operands are both integers. */
- else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
- && (GET_CODE (trueop0) == CONST_DOUBLE
- || GET_CODE (trueop0) == CONST_INT)
- && (GET_CODE (trueop1) == CONST_DOUBLE
- || GET_CODE (trueop1) == CONST_INT))
+ if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
+ && (GET_CODE (trueop0) == CONST_DOUBLE
+ || CONST_INT_P (trueop0))
+ && (GET_CODE (trueop1) == CONST_DOUBLE
+ || CONST_INT_P (trueop1)))
{
- int width = GET_MODE_BITSIZE (mode);
+ int width = GET_MODE_PRECISION (mode);
HOST_WIDE_INT l0s, h0s, l1s, h1s;
unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
we have to sign or zero-extend the values. */
if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
{
- l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
- l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
+ l0u &= GET_MODE_MASK (mode);
+ l1u &= GET_MODE_MASK (mode);
- if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- l0s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, l0s))
+ l0s |= ~GET_MODE_MASK (mode);
- if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- l1s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, l1s))
+ l1s |= ~GET_MODE_MASK (mode);
}
if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
- equal = (h0u == h1u && l0u == l1u);
- op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
- op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
- op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
- op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
+ if (h0u == h1u && l0u == l1u)
+ return comparison_result (code, CMP_EQ);
+ else
+ {
+ int cr;
+ cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
+ cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
+ return comparison_result (code, cr);
+ }
}
- /* Otherwise, there are some code-specific tests we can make. */
- else
+ /* Optimize comparisons with upper and lower bounds. */
+ if (HWI_COMPUTABLE_MODE_P (mode)
+ && CONST_INT_P (trueop1))
{
- /* Optimize comparisons with upper and lower bounds. */
- if (SCALAR_INT_MODE_P (mode)
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- {
- rtx mmin, mmax;
- int sign;
-
- if (code == GEU
- || code == LEU
- || code == GTU
- || code == LTU)
- sign = 0;
- else
- sign = 1;
+ int sign;
+ unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
+ HOST_WIDE_INT val = INTVAL (trueop1);
+ HOST_WIDE_INT mmin, mmax;
+
+ if (code == GEU
+ || code == LEU
+ || code == GTU
+ || code == LTU)
+ sign = 0;
+ else
+ sign = 1;
- get_mode_bounds (mode, sign, mode, &mmin, &mmax);
+ /* Get a reduced range if the sign bit is zero. */
+ if (nonzero <= (GET_MODE_MASK (mode) >> 1))
+ {
+ mmin = 0;
+ mmax = nonzero;
+ }
+ else
+ {
+ rtx mmin_rtx, mmax_rtx;
+ get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
- tem = NULL_RTX;
- switch (code)
+ mmin = INTVAL (mmin_rtx);
+ mmax = INTVAL (mmax_rtx);
+ if (sign)
{
- case GEU:
- case GE:
- /* x >= min is always true. */
- if (rtx_equal_p (trueop1, mmin))
- tem = const_true_rtx;
- else
- break;
-
- case LEU:
- case LE:
- /* x <= max is always true. */
- if (rtx_equal_p (trueop1, mmax))
- tem = const_true_rtx;
- break;
-
- case GTU:
- case GT:
- /* x > max is always false. */
- if (rtx_equal_p (trueop1, mmax))
- tem = const0_rtx;
- break;
+ unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
- case LTU:
- case LT:
- /* x < min is always false. */
- if (rtx_equal_p (trueop1, mmin))
- tem = const0_rtx;
- break;
-
- default:
- break;
+ mmin >>= (sign_copies - 1);
+ mmax >>= (sign_copies - 1);
}
- if (tem == const0_rtx
- || tem == const_true_rtx)
- return tem;
}
switch (code)
{
+ /* x >= y is always true for y <= mmin, always false for y > mmax. */
+ case GEU:
+ if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
+ return const_true_rtx;
+ if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
+ return const0_rtx;
+ break;
+ case GE:
+ if (val <= mmin)
+ return const_true_rtx;
+ if (val > mmax)
+ return const0_rtx;
+ break;
+
+ /* x <= y is always true for y >= mmax, always false for y < mmin. */
+ case LEU:
+ if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
+ return const_true_rtx;
+ if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
+ return const0_rtx;
+ break;
+ case LE:
+ if (val >= mmax)
+ return const_true_rtx;
+ if (val < mmin)
+ return const0_rtx;
+ break;
+
case EQ:
- if (trueop1 == const0_rtx && nonzero_address_p (op0))
+ /* x == y is always false for y out of range. */
+ if (val < mmin || val > mmax)
return const0_rtx;
break;
- case NE:
- if (trueop1 == const0_rtx && nonzero_address_p (op0))
+ /* x > y is always false for y >= mmax, always true for y < mmin. */
+ case GTU:
+ if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
+ return const0_rtx;
+ if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
+ return const_true_rtx;
+ break;
+ case GT:
+ if (val >= mmax)
+ return const0_rtx;
+ if (val < mmin)
return const_true_rtx;
break;
+ /* x < y is always false for y <= mmin, always true for y > mmax. */
+ case LTU:
+ if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
+ return const0_rtx;
+ if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
+ return const_true_rtx;
+ break;
case LT:
- /* Optimize abs(x) < 0.0. */
- if (trueop1 == CONST0_RTX (mode)
- && !HONOR_SNANS (mode)
- && (!INTEGRAL_MODE_P (mode)
- || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
+ if (val <= mmin)
+ return const0_rtx;
+ if (val > mmax)
+ return const_true_rtx;
+ break;
+
+ case NE:
+ /* x != y is always true for y out of range. */
+ if (val < mmin || val > mmax)
+ return const_true_rtx;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* Optimize integer comparisons with zero. */
+ if (trueop1 == const0_rtx)
+ {
+ /* Some addresses are known to be nonzero. We don't know
+ their sign, but equality comparisons are known. */
+ if (nonzero_address_p (trueop0))
+ {
+ if (code == EQ || code == LEU)
+ return const0_rtx;
+ if (code == NE || code == GTU)
+ return const_true_rtx;
+ }
+
+ /* See if the first operand is an IOR with a constant. If so, we
+ may be able to determine the result of this comparison. */
+ if (GET_CODE (op0) == IOR)
+ {
+ rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
+ if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
{
- tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
- : trueop0;
- if (GET_CODE (tem) == ABS)
+ int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
+ int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
+ && (UINTVAL (inner_const)
+ & ((unsigned HOST_WIDE_INT) 1
+ << sign_bitnum)));
+
+ switch (code)
{
- if (INTEGRAL_MODE_P (mode)
- && (issue_strict_overflow_warning
- (WARN_STRICT_OVERFLOW_CONDITIONAL)))
- warning (OPT_Wstrict_overflow,
- ("assuming signed overflow does not occur when "
- "assuming abs (x) < 0 is false"));
+ case EQ:
+ case LEU:
return const0_rtx;
+ case NE:
+ case GTU:
+ return const_true_rtx;
+ case LT:
+ case LE:
+ if (has_sign)
+ return const_true_rtx;
+ break;
+ case GT:
+ case GE:
+ if (has_sign)
+ return const0_rtx;
+ break;
+ default:
+ break;
}
}
+ }
+ }
- /* Optimize popcount (x) < 0. */
- if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
- return const_true_rtx;
+ /* Optimize comparison of ABS with zero. */
+ if (trueop1 == CONST0_RTX (mode)
+ && (GET_CODE (trueop0) == ABS
+ || (GET_CODE (trueop0) == FLOAT_EXTEND
+ && GET_CODE (XEXP (trueop0, 0)) == ABS)))
+ {
+ switch (code)
+ {
+ case LT:
+ /* Optimize abs(x) < 0.0. */
+ if (!HONOR_SNANS (mode)
+ && (!INTEGRAL_MODE_P (mode)
+ || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
+ {
+ if (INTEGRAL_MODE_P (mode)
+ && (issue_strict_overflow_warning
+ (WARN_STRICT_OVERFLOW_CONDITIONAL)))
+ warning (OPT_Wstrict_overflow,
+ ("assuming signed overflow does not occur when "
+ "assuming abs (x) < 0 is false"));
+ return const0_rtx;
+ }
break;
case GE:
/* Optimize abs(x) >= 0.0. */
- if (trueop1 == CONST0_RTX (mode)
- && !HONOR_NANS (mode)
+ if (!HONOR_NANS (mode)
&& (!INTEGRAL_MODE_P (mode)
|| (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
{
- tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
- : trueop0;
- if (GET_CODE (tem) == ABS)
- {
- if (INTEGRAL_MODE_P (mode)
- && (issue_strict_overflow_warning
- (WARN_STRICT_OVERFLOW_CONDITIONAL)))
- warning (OPT_Wstrict_overflow,
- ("assuming signed overflow does not occur when "
- "assuming abs (x) >= 0 is true"));
- return const_true_rtx;
- }
+ if (INTEGRAL_MODE_P (mode)
+ && (issue_strict_overflow_warning
+ (WARN_STRICT_OVERFLOW_CONDITIONAL)))
+ warning (OPT_Wstrict_overflow,
+ ("assuming signed overflow does not occur when "
+ "assuming abs (x) >= 0 is true"));
+ return const_true_rtx;
}
-
- /* Optimize popcount (x) >= 0. */
- if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
- return const_true_rtx;
break;
case UNGE:
/* Optimize ! (abs(x) < 0.0). */
- if (trueop1 == CONST0_RTX (mode))
- {
- tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
- : trueop0;
- if (GET_CODE (tem) == ABS)
- return const_true_rtx;
- }
- break;
+ return const_true_rtx;
default:
break;
}
-
- return 0;
}
- /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
- as appropriate. */
- switch (code)
- {
- case EQ:
- case UNEQ:
- return equal ? const_true_rtx : const0_rtx;
- case NE:
- case LTGT:
- return ! equal ? const_true_rtx : const0_rtx;
- case LT:
- case UNLT:
- return op0lt ? const_true_rtx : const0_rtx;
- case GT:
- case UNGT:
- return op1lt ? const_true_rtx : const0_rtx;
- case LTU:
- return op0ltu ? const_true_rtx : const0_rtx;
- case GTU:
- return op1ltu ? const_true_rtx : const0_rtx;
- case LE:
- case UNLE:
- return equal || op0lt ? const_true_rtx : const0_rtx;
- case GE:
- case UNGE:
- return equal || op1lt ? const_true_rtx : const0_rtx;
- case LEU:
- return equal || op0ltu ? const_true_rtx : const0_rtx;
- case GEU:
- return equal || op1ltu ? const_true_rtx : const0_rtx;
- case ORDERED:
- return const_true_rtx;
- case UNORDERED:
- return const0_rtx;
- default:
- gcc_unreachable ();
- }
+ return 0;
}
\f
/* Simplify CODE, an operation with result mode MODE and three operands,
enum machine_mode op0_mode, rtx op0, rtx op1,
rtx op2)
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
+ bool any_change = false;
+ rtx tem;
/* VOIDmode means "infinite" precision. */
if (width == 0)
switch (code)
{
+ case FMA:
+ /* Simplify negations around the multiplication. */
+ /* -a * -b + c => a * b + c. */
+ if (GET_CODE (op0) == NEG)
+ {
+ tem = simplify_unary_operation (NEG, mode, op1, mode);
+ if (tem)
+ op1 = tem, op0 = XEXP (op0, 0), any_change = true;
+ }
+ else if (GET_CODE (op1) == NEG)
+ {
+ tem = simplify_unary_operation (NEG, mode, op0, mode);
+ if (tem)
+ op0 = tem, op1 = XEXP (op1, 0), any_change = true;
+ }
+
+ /* Canonicalize the two multiplication operands. */
+ /* a * -b + c => -b * a + c. */
+ if (swap_commutative_operands_p (op0, op1))
+ tem = op0, op0 = op1, op1 = tem, any_change = true;
+
+ if (any_change)
+ return gen_rtx_FMA (mode, op0, op1, op2);
+ return NULL_RTX;
+
case SIGN_EXTRACT:
case ZERO_EXTRACT:
- if (GET_CODE (op0) == CONST_INT
- && GET_CODE (op1) == CONST_INT
- && GET_CODE (op2) == CONST_INT
+ if (CONST_INT_P (op0)
+ && CONST_INT_P (op1)
+ && CONST_INT_P (op2)
&& ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
&& width <= (unsigned) HOST_BITS_PER_WIDE_INT)
{
/* Extracting a bit-field from a constant */
- HOST_WIDE_INT val = INTVAL (op0);
-
+ unsigned HOST_WIDE_INT val = UINTVAL (op0);
+ HOST_WIDE_INT op1val = INTVAL (op1);
+ HOST_WIDE_INT op2val = INTVAL (op2);
if (BITS_BIG_ENDIAN)
- val >>= (GET_MODE_BITSIZE (op0_mode)
- - INTVAL (op2) - INTVAL (op1));
+ val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
else
- val >>= INTVAL (op2);
+ val >>= op2val;
- if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
+ if (HOST_BITS_PER_WIDE_INT != op1val)
{
/* First zero-extend. */
- val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
+ val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
/* If desired, propagate sign bit. */
if (code == SIGN_EXTRACT
- && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
- val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
+ && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
+ != 0)
+ val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
}
- /* Clear the bits that don't belong in our mode,
- unless they and our sign bit are all one.
- So we get either a reasonable negative value or a reasonable
- unsigned value for this mode. */
- if (width < HOST_BITS_PER_WIDE_INT
- && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
- != ((HOST_WIDE_INT) (-1) << (width - 1))))
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
-
return gen_int_mode (val, mode);
}
break;
case IF_THEN_ELSE:
- if (GET_CODE (op0) == CONST_INT)
+ if (CONST_INT_P (op0))
return op0 != const0_rtx ? op1 : op2;
/* Convert c ? a : a into "a". */
rtx temp;
/* Look for happy constants in op1 and op2. */
- if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
+ if (CONST_INT_P (op1) && CONST_INT_P (op2))
{
HOST_WIDE_INT t = INTVAL (op1);
HOST_WIDE_INT f = INTVAL (op2);
/* See if any simplifications were possible. */
if (temp)
{
- if (GET_CODE (temp) == CONST_INT)
+ if (CONST_INT_P (temp))
return temp == const0_rtx ? op2 : op1;
else if (temp)
return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
gcc_assert (GET_MODE (op1) == mode);
gcc_assert (VECTOR_MODE_P (mode));
op2 = avoid_constant_pool_reference (op2);
- if (GET_CODE (op2) == CONST_INT)
+ if (CONST_INT_P (op2))
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
return 0;
}
-/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
- returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
+/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
+ or CONST_VECTOR,
+ returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
Works by unpacking OP into a collection of 8-bit values
represented as a little-endian array of 'unsigned char', selecting by BYTE,
and then repacking them again for OUTERMODE. */
static rtx
-simplify_immed_subreg (enum machine_mode outermode, rtx op,
+simplify_immed_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
/* We support up to 512-bit values (for V8DFmode). */
enum machine_mode outer_submode;
/* Some ports misuse CCmode. */
- if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
+ if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
return op;
/* We have no way to represent a complex constant at the rtl level. */
gcc_assert (BITS_PER_UNIT % value_bit == 0);
/* I don't know how to handle endianness of sub-units. */
gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
-
+
for (elem = 0; elem < num_elem; elem++)
{
unsigned char * vp;
rtx el = elems[elem];
-
+
/* Vectors are kept in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
- unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
+ unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
+ (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
vp = value + (bytele * BITS_PER_UNIT) / value_bit;
}
-
+
switch (GET_CODE (el))
{
case CONST_INT:
for (i = 0;
- i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
+ i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
*vp++ = INTVAL (el) >> i;
/* CONST_INTs are always logically sign-extended. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = INTVAL (el) < 0 ? -1 : 0;
break;
-
+
case CONST_DOUBLE:
if (GET_MODE (el) == VOIDmode)
{
+ unsigned char extend = 0;
/* If this triggers, someone should have generated a
CONST_INT instead. */
gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
*vp++ = CONST_DOUBLE_LOW (el) >> i;
- while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
+ while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
{
*vp++
= CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
i += value_bit;
}
- /* It shouldn't matter what's done here, so fill it with
- zero. */
+
+ if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
+ extend = -1;
for (; i < elem_bitsize; i += value_bit)
- *vp++ = 0;
+ *vp++ = extend;
}
else
{
ibase = i;
*vp++ = tmp[ibase / 32] >> i % 32;
}
-
+
/* It shouldn't matter what's done here, so fill it with
zero. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = 0;
}
break;
-
+
+ case CONST_FIXED:
+ if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
+ {
+ for (i = 0; i < elem_bitsize; i += value_bit)
+ *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
+ }
+ else
+ {
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
+ *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
+ for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
+ i += value_bit)
+ *vp++ = CONST_FIXED_VALUE_HIGH (el)
+ >> (i - HOST_BITS_PER_WIDE_INT);
+ for (; i < elem_bitsize; i += value_bit)
+ *vp++ = 0;
+ }
+ break;
+
default:
gcc_unreachable ();
}
will already have offset 0. */
if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
{
- unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
+ unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
- byte);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
value_start = byte * (BITS_PER_UNIT / value_bit);
/* Re-pack the value. */
-
+
if (VECTOR_MODE_P (outermode))
{
num_elem = GET_MODE_NUNITS (outermode);
for (elem = 0; elem < num_elem; elem++)
{
unsigned char *vp;
-
+
/* Vectors are stored in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
- unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
+ unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
for (i = 0;
i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
- lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
+ lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
for (; i < elem_bitsize; i += value_bit)
- hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
- << (i - HOST_BITS_PER_WIDE_INT));
-
+ hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
+ << (i - HOST_BITS_PER_WIDE_INT);
+
/* immed_double_const doesn't call trunc_int_for_mode. I don't
know why. */
if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
elems[elem] = gen_int_mode (lo, outer_submode);
- else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
+ else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
elems[elem] = immed_double_const (lo, hi, outer_submode);
else
return NULL_RTX;
}
break;
-
+
case MODE_FLOAT:
case MODE_DECIMAL_FLOAT:
{
REAL_VALUE_TYPE r;
long tmp[max_bitsize / 32];
-
+
/* real_from_target wants its input in words affected by
FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
and use WORDS_BIG_ENDIAN instead; see the documentation
elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
}
break;
-
+
+ case MODE_FRACT:
+ case MODE_UFRACT:
+ case MODE_ACCUM:
+ case MODE_UACCUM:
+ {
+ FIXED_VALUE_TYPE f;
+ f.data.low = 0;
+ f.data.high = 0;
+ f.mode = outer_submode;
+
+ for (i = 0;
+ i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
+ i += value_bit)
+ f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
+ for (; i < elem_bitsize; i += value_bit)
+ f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
+ << (i - HOST_BITS_PER_WIDE_INT));
+
+ elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
+ }
+ break;
+
default:
gcc_unreachable ();
}
if (outermode == innermode && !byte)
return op;
- if (GET_CODE (op) == CONST_INT
+ if (CONST_INT_P (op)
|| GET_CODE (op) == CONST_DOUBLE
+ || GET_CODE (op) == CONST_FIXED
|| GET_CODE (op) == CONST_VECTOR)
return simplify_immed_subreg (outermode, op, innermode, byte);
return newx;
if (validate_subreg (outermode, innermostmode,
SUBREG_REG (op), final_offset))
- return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
+ {
+ newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
+ if (SUBREG_PROMOTED_VAR_P (op)
+ && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
+ && GET_MODE_CLASS (outermode) == MODE_INT
+ && IN_RANGE (GET_MODE_SIZE (outermode),
+ GET_MODE_SIZE (innermode),
+ GET_MODE_SIZE (innermostmode))
+ && subreg_lowpart_p (newx))
+ {
+ SUBREG_PROMOTED_VAR_P (newx) = 1;
+ SUBREG_PROMOTED_UNSIGNED_SET
+ (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
+ }
+ return newx;
+ }
return NULL_RTX;
}
suppress this simplification. If the hard register is the stack,
frame, or argument pointer, leave this as a SUBREG. */
- if (REG_P (op)
- && REGNO (op) < FIRST_PSEUDO_REGISTER
-#ifdef CANNOT_CHANGE_MODE_CLASS
- && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
- && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
- && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
-#endif
- && ((reload_completed && !frame_pointer_needed)
- || (REGNO (op) != FRAME_POINTER_REGNUM
-#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
- && REGNO (op) != HARD_FRAME_POINTER_REGNUM
-#endif
- ))
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
- && REGNO (op) != ARG_POINTER_REGNUM
-#endif
- && REGNO (op) != STACK_POINTER_REGNUM
- && subreg_offset_representable_p (REGNO (op), innermode,
- byte, outermode))
+ if (REG_P (op) && HARD_REGISTER_P (op))
{
- unsigned int regno = REGNO (op);
- unsigned int final_regno
- = regno + subreg_regno_offset (regno, innermode, byte, outermode);
-
- /* ??? We do allow it if the current REG is not valid for
- its mode. This is a kludge to work around how float/complex
- arguments are passed on 32-bit SPARC and should be fixed. */
- if (HARD_REGNO_MODE_OK (final_regno, outermode)
- || ! HARD_REGNO_MODE_OK (regno, innermode))
+ unsigned int regno, final_regno;
+
+ regno = REGNO (op);
+ final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
+ if (HARD_REGISTER_NUM_P (final_regno))
{
rtx x;
int final_offset = byte;
/* Optimize SUBREG truncations of zero and sign extended values. */
if ((GET_CODE (op) == ZERO_EXTEND
|| GET_CODE (op) == SIGN_EXTEND)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
{
unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
enum machine_mode origmode = GET_MODE (XEXP (op, 0));
if (outermode == origmode)
return XEXP (op, 0);
- if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
+ if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
subreg_lowpart_offset (outermode,
origmode));
/* A SUBREG resulting from a zero extension may fold to zero if
it extracts higher bits that the ZERO_EXTEND's source bits. */
if (GET_CODE (op) == ZERO_EXTEND
- && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
+ && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
return CONST0_RTX (outermode);
}
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
&& SCALAR_INT_MODE_P (outermode)
+ && SCALAR_INT_MODE_P (innermode)
/* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
to avoid the possibility that an outer LSHIFTRT shifts by more
than the sign extension's sign_bit_copies and introduces zeros
into the high bits of the result. */
- && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (ASHIFTRT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (LSHIFTRT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
the outer subreg is effectively a truncation to the original mode. */
if (GET_CODE (op) == ASHIFT
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (ASHIFT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
+ /* Recognize a word extraction from a multi-word subreg. */
+ if ((GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ASHIFTRT)
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
+ && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
+ && CONST_INT_P (XEXP (op, 1))
+ && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
+ && INTVAL (XEXP (op, 1)) >= 0
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
+ && byte == subreg_lowpart_offset (outermode, innermode))
+ {
+ int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
+ return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
+ (WORDS_BIG_ENDIAN
+ ? byte - shifted_bytes
+ : byte + shifted_bytes));
+ }
+
+ /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
+ and try replacing the SUBREG and shift with it. Don't do this if
+ the MEM has a mode-dependent address or if we would be widening it. */
+
+ if ((GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ASHIFTRT)
+ && SCALAR_INT_MODE_P (innermode)
+ && MEM_P (XEXP (op, 0))
+ && CONST_INT_P (XEXP (op, 1))
+ && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
+ && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
+ && INTVAL (XEXP (op, 1)) > 0
+ && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
+ && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
+ && ! MEM_VOLATILE_P (XEXP (op, 0))
+ && byte == subreg_lowpart_offset (outermode, innermode)
+ && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
+ || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
+ {
+ int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
+ return adjust_address_nv (XEXP (op, 0), outermode,
+ (WORDS_BIG_ENDIAN
+ ? byte - shifted_bytes
+ : byte + shifted_bytes));
+ }
+
return NULL_RTX;
}
simplification and 1 for tree simplification. */
rtx
-simplify_rtx (rtx x)
+simplify_rtx (const_rtx x)
{
- enum rtx_code code = GET_CODE (x);
- enum machine_mode mode = GET_MODE (x);
+ const enum rtx_code code = GET_CODE (x);
+ const enum machine_mode mode = GET_MODE (x);
switch (GET_RTX_CLASS (code))
{