/* Fold a constant sub-tree into a single node for C-compiler
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
-This file is part of GNU CC.
+This file is part of GCC.
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
/*@@ This file should be rewritten to use an arbitrary precision
@@ representation for "struct tree_int_cst" and "struct tree_real_cst".
#include "config.h"
#include "system.h"
-#include <setjmp.h>
#include "flags.h"
#include "tree.h"
#include "rtl.h"
#include "tm_p.h"
#include "toplev.h"
#include "ggc.h"
+#include "hashtab.h"
static void encode PARAMS ((HOST_WIDE_INT *,
unsigned HOST_WIDE_INT,
static void decode PARAMS ((HOST_WIDE_INT *,
unsigned HOST_WIDE_INT *,
HOST_WIDE_INT *));
+#ifndef REAL_ARITHMETIC
+static void exact_real_inverse_1 PARAMS ((PTR));
+#endif
static tree negate_expr PARAMS ((tree));
static tree split_tree PARAMS ((tree, enum tree_code, tree *, tree *,
int));
static tree associate_trees PARAMS ((tree, tree, enum tree_code, tree));
-static tree int_const_binop PARAMS ((enum tree_code, tree, tree, int, int));
+static tree int_const_binop PARAMS ((enum tree_code, tree, tree, int));
static void const_binop_1 PARAMS ((PTR));
static tree const_binop PARAMS ((enum tree_code, tree, tree, int));
+static hashval_t size_htab_hash PARAMS ((const void *));
+static int size_htab_eq PARAMS ((const void *, const void *));
static void fold_convert_1 PARAMS ((PTR));
static tree fold_convert PARAMS ((tree, tree));
static enum tree_code invert_tree_comparison PARAMS ((enum tree_code));
static int multiple_of_p PARAMS ((tree, tree, tree));
static tree constant_boolean_node PARAMS ((int, tree));
static int count_cond PARAMS ((tree, int));
-
+static tree fold_binary_op_with_conditional_arg
+ PARAMS ((enum tree_code, tree, tree, tree, int));
+
#ifndef BRANCH_COST
#define BRANCH_COST 1
#endif
HOST_WIDE_INT arg1[4];
HOST_WIDE_INT arg2[4];
HOST_WIDE_INT prod[4 * 2];
- register unsigned HOST_WIDE_INT carry;
- register int i, j, k;
+ unsigned HOST_WIDE_INT carry;
+ int i, j, k;
unsigned HOST_WIDE_INT toplow, neglow;
HOST_WIDE_INT tophigh, neghigh;
HOST_WIDE_INT *hv;
int arith;
{
+ unsigned HOST_WIDE_INT signmask;
+
if (count < 0)
{
rshift_double (l1, h1, -count, prec, lv, hv, arith);
| (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
*lv = l1 << count;
}
+
+ /* Sign extend all bits that are beyond the precision. */
+
+ signmask = -((prec > HOST_BITS_PER_WIDE_INT
+ ? (*hv >> (prec - HOST_BITS_PER_WIDE_INT - 1))
+ : (*lv >> (prec - 1))) & 1);
+
+ if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if (prec >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
+ }
+ else
+ {
+ *hv = signmask;
+ *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
+ *lv |= signmask << prec;
+ }
}
/* Shift the doubleword integer in L1, H1 right by COUNT places
rshift_double (l1, h1, count, prec, lv, hv, arith)
unsigned HOST_WIDE_INT l1;
HOST_WIDE_INT h1, count;
- unsigned int prec ATTRIBUTE_UNUSED;
+ unsigned int prec;
unsigned HOST_WIDE_INT *lv;
HOST_WIDE_INT *hv;
int arith;
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
- *hv = signmask;
- *lv = signmask;
+ *hv = 0;
+ *lv = 0;
}
else if (count >= HOST_BITS_PER_WIDE_INT)
{
- *hv = signmask;
- *lv = ((signmask << (2 * HOST_BITS_PER_WIDE_INT - count - 1) << 1)
- | ((unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT)));
+ *hv = 0;
+ *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
}
else
{
+ *hv = (unsigned HOST_WIDE_INT) h1 >> count;
*lv = ((l1 >> count)
| ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
- *hv = ((signmask << (HOST_BITS_PER_WIDE_INT - count))
- | ((unsigned HOST_WIDE_INT) h1 >> count));
+ }
+
+ /* Zero / sign extend all bits that are beyond the precision. */
+
+ if (count >= (HOST_WIDE_INT)prec)
+ {
+ *hv = signmask;
+ *lv = signmask;
+ }
+ else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
+ *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
+ }
+ else
+ {
+ *hv = signmask;
+ *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
+ *lv |= signmask << (prec - count);
}
}
\f
CODE is a tree code for a kind of division, one of
TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
or EXACT_DIV_EXPR
- It controls how the quotient is rounded to a integer.
+ It controls how the quotient is rounded to an integer.
Return nonzero if the operation overflows.
UNS nonzero says do unsigned division. */
int quo_neg = 0;
HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
HOST_WIDE_INT den[4], quo[4];
- register int i, j;
+ int i, j;
unsigned HOST_WIDE_INT work;
unsigned HOST_WIDE_INT carry = 0;
unsigned HOST_WIDE_INT lnum = lnum_orig;
else
quo_est = BASE - 1;
- /* Refine quo_est so it's usually correct, and at most one high. */
+ /* Refine quo_est so it's usually correct, and at most one high. */
tmp = work - quo_est * den[den_hi_sig];
if (tmp < BASE
&& (den[den_hi_sig - 1] * quo_est
unsigned sign : 1;
unsigned exponent : 11;
unsigned mantissa1 : 20;
- unsigned mantissa2;
+ unsigned mantissa2 : 32;
} little_endian;
struct {
- unsigned mantissa2;
+ unsigned mantissa2 : 32;
unsigned mantissa1 : 20;
unsigned exponent : 11;
unsigned sign : 1;
unsigned sign : 1;
unsigned exponent : 11;
unsigned mantissa1 : 20;
- unsigned mantissa2;
+ unsigned mantissa2 : 32;
} little_endian;
struct {
- unsigned mantissa2;
+ unsigned mantissa2 : 32;
unsigned mantissa1 : 20;
unsigned exponent : 11;
unsigned sign : 1;
unsigned sign : 1;
unsigned exponent : 11;
unsigned mantissa1 : 20;
- unsigned mantissa2;
+ unsigned mantissa2 : 32;
} little_endian;
struct {
- unsigned mantissa2;
+ unsigned mantissa2 : 32;
unsigned mantissa1 : 20;
unsigned exponent : 11;
unsigned sign : 1;
/* Try to change R into its exact multiplicative inverse in machine mode
MODE. Return nonzero function value if successful. */
+struct exact_real_inverse_args
+{
+ REAL_VALUE_TYPE *r;
+ enum machine_mode mode;
+ int success;
+};
-int
-exact_real_inverse (mode, r)
- enum machine_mode mode;
- REAL_VALUE_TYPE *r;
+static void
+exact_real_inverse_1 (p)
+ PTR p;
{
- jmp_buf float_error;
+ struct exact_real_inverse_args *args =
+ (struct exact_real_inverse_args *) p;
+
+ enum machine_mode mode = args->mode;
+ REAL_VALUE_TYPE *r = args->r;
+
union
- {
- double d;
- unsigned short i[4];
- }x, t, y;
+ {
+ double d;
+ unsigned short i[4];
+ }
+ x, t, y;
#ifdef CHECK_FLOAT_VALUE
int i;
#endif
- /* Usually disable if bounds checks are not reliable. */
- if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT) && !flag_pretend_float)
- return 0;
-
/* Set array index to the less significant bits in the unions, depending
- on the endian-ness of the host doubles.
- Disable if insufficient information on the data structure. */
-#if HOST_FLOAT_FORMAT == UNKNOWN_FLOAT_FORMAT
- return 0;
+ on the endian-ness of the host doubles. */
+#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT \
+ || HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+# define K 2
#else
-#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
-#define K 2
-#else
-#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
-#define K 2
-#else
-#define K (2 * HOST_FLOAT_WORDS_BIG_ENDIAN)
-#endif
-#endif
+# define K (2 * HOST_FLOAT_WORDS_BIG_ENDIAN)
#endif
- if (setjmp (float_error))
- {
- /* Don't do the optimization if there was an arithmetic error. */
-fail:
- set_float_handler (NULL_PTR);
- return 0;
- }
- set_float_handler (float_error);
-
/* Domain check the argument. */
x.d = *r;
if (x.d == 0.0)
#endif
/* Output the reciprocal and return success flag. */
- set_float_handler (NULL_PTR);
*r = y.d;
- return 1;
+ args->success = 1;
+ return;
+
+ fail:
+ args->success = 0;
+ return;
+
+#undef K
+}
+
+
+int
+exact_real_inverse (mode, r)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE *r;
+{
+ struct exact_real_inverse_args args;
+
+ /* Disable if insufficient information on the data structure. */
+#if HOST_FLOAT_FORMAT == UNKNOWN_FLOAT_FORMAT
+ return 0;
+#endif
+
+ /* Usually disable if bounds checks are not reliable. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT) && !flag_pretend_float)
+ return 0;
+
+ args.mode = mode;
+ args.r = r;
+
+ if (do_float_handler (exact_real_inverse_1, (PTR) &args))
+ return args.success;
+ return 0;
}
/* Convert C99 hexadecimal floating point string constant S. Return
REAL_VALUE_TYPE
real_hex_to_f (s, mode)
- char *s;
+ const char *s;
enum machine_mode mode;
{
REAL_VALUE_TYPE ip;
- char *p = s;
+ const char *p = s;
unsigned HOST_WIDE_INT low, high;
int shcount, nrmcount, k;
int sign, expsign, isfloat;
shcount = 0;
while ((c = *p) != '\0')
{
- if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')
- || (c >= 'a' && c <= 'f'))
+ if (ISXDIGIT (c))
{
- k = c & CHARMASK;
- if (k >= 'a' && k <= 'f')
- k = k - 'a' + 10;
- else if (k >= 'A')
- k = k - 'A' + 10;
- else
- k = k - '0';
+ k = hex_value (c & CHARMASK);
if ((high & 0xf0000000) == 0)
{
case MINUS_EXPR:
/* - (A - B) -> B - A */
- if (! FLOAT_TYPE_P (type) || flag_fast_math)
+ if (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
return convert (type,
fold (build (MINUS_EXPR, TREE_TYPE (t),
TREE_OPERAND (t, 1),
break;
}
- return convert (type, build1 (NEGATE_EXPR, TREE_TYPE (t), t));
+ return convert (type, fold (build1 (NEGATE_EXPR, TREE_TYPE (t), t)));
}
\f
/* Split a tree IN into a constant, literal and variable parts that could be
*conp = 0;
*litp = 0;
- /* Strip any conversions that don't change the machine mode or signedness. */
+ /* Strip any conversions that don't change the machine mode or signedness. */
STRIP_SIGN_NOPS (in);
if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST)
*conp = op1, neg_conp_p = neg1_p, op1 = 0;
/* If we haven't dealt with either operand, this is not a case we can
- decompose. Otherwise, VAR is either of the ones remaining, if any. */
+ decompose. Otherwise, VAR is either of the ones remaining, if any. */
if (op0 != 0 && op1 != 0)
var = in;
else if (op0 != 0)
/* Combine two integer constants ARG1 and ARG2 under operation CODE
to produce a new constant.
- If NOTRUNC is nonzero, do not truncate the result to fit the data type.
- If FORSIZE is nonzero, compute overflow for unsigned types. */
+ If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
static tree
-int_const_binop (code, arg1, arg2, notrunc, forsize)
+int_const_binop (code, arg1, arg2, notrunc)
enum tree_code code;
- register tree arg1, arg2;
- int notrunc, forsize;
+ tree arg1, arg2;
+ int notrunc;
{
unsigned HOST_WIDE_INT int1l, int2l;
HOST_WIDE_INT int1h, int2h;
HOST_WIDE_INT hi;
unsigned HOST_WIDE_INT garbagel;
HOST_WIDE_INT garbageh;
- register tree t;
- int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
+ tree t;
+ tree type = TREE_TYPE (arg1);
+ int uns = TREE_UNSIGNED (type);
+ int is_sizetype
+ = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
int overflow = 0;
int no_overflow = 0;
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- lshift_double (int1l, int1h, int2l, TYPE_PRECISION (TREE_TYPE (arg1)),
+ lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
&low, &hi, !uns);
no_overflow = 1;
break;
case RROTATE_EXPR:
int2l = - int2l;
case LROTATE_EXPR:
- lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (TREE_TYPE (arg1)),
+ lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
&low, &hi);
break;
break;
}
- /* ... fall through ... */
+ /* ... fall through ... */
case ROUND_DIV_EXPR:
if (int2h == 0 && int2l == 1)
low = 1, hi = 0;
break;
}
- overflow = div_and_round_double (code, uns,
- int1l, int1h, int2l, int2h,
+ overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
&low, &hi, &garbagel, &garbageh);
break;
break;
}
- /* ... fall through ... */
+ /* ... fall through ... */
case ROUND_MOD_EXPR:
overflow = div_and_round_double (code, uns,
abort ();
}
- if (forsize && hi == 0 && low < 10000
+ /* If this is for a sizetype, can be represented as one (signed)
+ HOST_WIDE_INT word, and doesn't overflow, use size_int since it caches
+ constants. */
+ if (is_sizetype
+ && ((hi == 0 && (HOST_WIDE_INT) low >= 0)
+ || (hi == -1 && (HOST_WIDE_INT) low < 0))
&& overflow == 0 && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2))
- return size_int_type_wide (low, TREE_TYPE (arg1));
+ return size_int_type_wide (low, type);
else
{
t = build_int_2 (low, hi);
}
TREE_OVERFLOW (t)
- = ((notrunc ? (!uns || forsize) && overflow
- : force_fit_type (t, (!uns || forsize) && overflow) && ! no_overflow)
+ = ((notrunc
+ ? (!uns || is_sizetype) && overflow
+ : (force_fit_type (t, (!uns || is_sizetype) && overflow)
+ && ! no_overflow))
| TREE_OVERFLOW (arg1)
| TREE_OVERFLOW (arg2));
/* If we're doing a size calculation, unsigned arithmetic does overflow.
So check if force_fit_type truncated the value. */
- if (forsize
+ if (is_sizetype
&& ! TREE_OVERFLOW (t)
&& (TREE_INT_CST_HIGH (t) != hi
|| TREE_INT_CST_LOW (t) != low))
static tree
const_binop (code, arg1, arg2, notrunc)
enum tree_code code;
- register tree arg1, arg2;
+ tree arg1, arg2;
int notrunc;
{
STRIP_NOPS (arg1);
STRIP_NOPS (arg2);
if (TREE_CODE (arg1) == INTEGER_CST)
- return int_const_binop (code, arg1, arg2, notrunc, 0);
+ return int_const_binop (code, arg1, arg2, notrunc);
#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
if (TREE_CODE (arg1) == REAL_CST)
args.code = code;
if (do_float_handler (const_binop_1, (PTR) &args))
- /* Receive output from const_binop_1. */
+ /* Receive output from const_binop_1. */
t = args.t;
else
{
- /* We got an exception from const_binop_1. */
+ /* We got an exception from const_binop_1. */
t = copy_node (arg1);
overflow = 1;
}
#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
if (TREE_CODE (arg1) == COMPLEX_CST)
{
- register tree type = TREE_TYPE (arg1);
- register tree r1 = TREE_REALPART (arg1);
- register tree i1 = TREE_IMAGPART (arg1);
- register tree r2 = TREE_REALPART (arg2);
- register tree i2 = TREE_IMAGPART (arg2);
- register tree t;
+ tree type = TREE_TYPE (arg1);
+ tree r1 = TREE_REALPART (arg1);
+ tree i1 = TREE_IMAGPART (arg1);
+ tree r2 = TREE_REALPART (arg2);
+ tree i2 = TREE_IMAGPART (arg2);
+ tree t;
switch (code)
{
case RDIV_EXPR:
{
- register tree magsquared
+ tree magsquared
= const_binop (PLUS_EXPR,
const_binop (MULT_EXPR, r2, r2, notrunc),
const_binop (MULT_EXPR, i2, i2, notrunc),
}
return 0;
}
+
+/* These are the hash table functions for the hash table of INTEGER_CST
+ nodes of a sizetype. */
+
+/* Return the hash code code X, an INTEGER_CST. */
+
+static hashval_t
+size_htab_hash (x)
+ const void *x;
+{
+ tree t = (tree) x;
+
+ return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t)
+ ^ (hashval_t) ((long) TREE_TYPE (t) >> 3)
+ ^ (TREE_OVERFLOW (t) << 20));
+}
+
+/* Return non-zero if the value represented by *X (an INTEGER_CST tree node)
+ is the same as that given by *Y, which is the same. */
+
+static int
+size_htab_eq (x, y)
+ const void *x;
+ const void *y;
+{
+ tree xt = (tree) x;
+ tree yt = (tree) y;
+
+ return (TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt)
+ && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt)
+ && TREE_TYPE (xt) == TREE_TYPE (yt)
+ && TREE_OVERFLOW (xt) == TREE_OVERFLOW (yt));
+}
\f
/* Return an INTEGER_CST with value whose low-order HOST_BITS_PER_WIDE_INT
bits are given by NUMBER and of the sizetype represented by KIND. */
HOST_WIDE_INT number;
tree type;
{
- /* Type-size nodes already made for small sizes. */
- static tree size_table[2048 + 1];
- static int init_p = 0;
- tree t;
+ static htab_t size_htab = 0;
+ static tree new_const = 0;
+ PTR *slot;
- if (! init_p)
+ if (size_htab == 0)
{
- ggc_add_tree_root ((tree *) size_table,
- sizeof size_table / sizeof (tree));
- init_p = 1;
+ size_htab = htab_create (1024, size_htab_hash, size_htab_eq, NULL);
+ ggc_add_deletable_htab (size_htab, NULL, NULL);
+ new_const = make_node (INTEGER_CST);
+ ggc_add_tree_root (&new_const, 1);
}
- /* If this is a positive number that fits in the table we use to hold
- cached entries, see if it is already in the table and put it there
- if not. */
- if (number >= 0 && number < (int) ARRAY_SIZE (size_table))
+ /* Adjust NEW_CONST to be the constant we want. If it's already in the
+ hash table, we return the value from the hash table. Otherwise, we
+ place that in the hash table and make a new node for the next time. */
+ TREE_INT_CST_LOW (new_const) = number;
+ TREE_INT_CST_HIGH (new_const) = number < 0 ? -1 : 0;
+ TREE_TYPE (new_const) = type;
+ TREE_OVERFLOW (new_const) = TREE_CONSTANT_OVERFLOW (new_const)
+ = force_fit_type (new_const, 0);
+
+ slot = htab_find_slot (size_htab, new_const, INSERT);
+ if (*slot == 0)
{
- if (size_table[number] != 0)
- for (t = size_table[number]; t != 0; t = TREE_CHAIN (t))
- if (TREE_TYPE (t) == type)
- return t;
-
- t = build_int_2 (number, 0);
- TREE_TYPE (t) = type;
- TREE_CHAIN (t) = size_table[number];
- size_table[number] = t;
+ tree t = new_const;
+ *slot = (PTR) new_const;
+ new_const = make_node (INTEGER_CST);
return t;
}
-
- t = build_int_2 (number, number < 0 ? -1 : 0);
- TREE_TYPE (t) = type;
- TREE_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (t) = force_fit_type (t, 0);
- return t;
+ else
+ return (tree) *slot;
}
/* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
return arg1;
/* Handle general case of two integer constants. */
- return int_const_binop (code, arg0, arg1, 0, 1);
+ return int_const_binop (code, arg0, arg1, 0);
}
if (arg0 == error_mark_node || arg1 == error_mark_node)
/* This structure is used to communicate arguments to fold_convert_1. */
struct fc_args
{
- tree arg1; /* Input: value to convert. */
- tree type; /* Input: type to convert value to. */
- tree t; /* Ouput: result of conversion. */
+ tree arg1; /* Input: value to convert. */
+ tree type; /* Input: type to convert value to. */
+ tree t; /* Output: result of conversion. */
};
/* Function to convert floating-point constants, protected by floating
static tree
fold_convert (t, arg1)
- register tree t;
- register tree arg1;
+ tree t;
+ tree arg1;
{
- register tree type = TREE_TYPE (t);
+ tree type = TREE_TYPE (t);
int overflow = 0;
if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
/* If we are trying to make a sizetype for a small integer, use
size_int to pick up cached types to reduce duplicate nodes. */
if (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
+ && !TREE_CONSTANT_OVERFLOW (arg1)
&& compare_tree_int (arg1, 10000) < 0)
return size_int_type_wide (TREE_INT_CST_LOW (arg1), type);
case 'r':
/* If either of the pointer (or reference) expressions we are dereferencing
- contain a side effect, these cannot be equal. */
+ contain a side effect, these cannot be equal. */
if (TREE_SIDE_EFFECTS (arg0)
|| TREE_SIDE_EFFECTS (arg1))
return 0;
case COMPONENT_REF:
case ARRAY_REF:
+ case ARRAY_RANGE_REF:
return (operand_equal_p (TREE_OPERAND (arg0, 0),
TREE_OPERAND (arg1, 0), 0)
&& operand_equal_p (TREE_OPERAND (arg0, 1),
if (TREE_CODE_CLASS (code) == '<')
{
if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
- && !flag_fast_math && code != NE_EXPR && code != EQ_EXPR)
+ && !flag_unsafe_math_optimizations
+ && code != NE_EXPR
+ && code != EQ_EXPR)
return build1 (TRUTH_NOT_EXPR, type, arg);
else
return build (invert_tree_comparison (code), type,
enum machine_mode lmode, rmode, nmode;
int lunsignedp, runsignedp;
int lvolatilep = 0, rvolatilep = 0;
- unsigned int alignment;
tree linner, rinner = NULL_TREE;
tree mask;
tree offset;
do anything if the inner expression is a PLACEHOLDER_EXPR since we
then will no longer be able to replace it. */
linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
- &lunsignedp, &lvolatilep, &alignment);
+ &lunsignedp, &lvolatilep);
if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
|| offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
return 0;
if (!const_p)
{
/* If this is not a constant, we can only do something if bit positions,
- sizes, and signedness are the same. */
+ sizes, and signedness are the same. */
rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
- &runsignedp, &rvolatilep, &alignment);
+ &runsignedp, &rvolatilep);
if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
|| lunsignedp != runsignedp || offset != 0
convert (unsigned_type, rhs),
size_int (lbitsize), 0)))
{
- warning ("comparison is always %d due to width of bitfield",
+ warning ("comparison is always %d due to width of bit-field",
code == NE_EXPR);
return convert (compare_type,
(code == NE_EXPR
size_int (lbitsize - 1), 0);
if (! integer_zerop (tem) && ! integer_all_onesp (tem))
{
- warning ("comparison is always %d due to width of bitfield",
+ warning ("comparison is always %d due to width of bit-field",
code == NE_EXPR);
return convert (compare_type,
(code == NE_EXPR
tree mask, inner, offset;
tree unsigned_type;
unsigned int precision;
- unsigned int alignment;
/* All the optimizations using this function assume integer fields.
There are problems with FP fields since the type_for_size call
}
inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
- punsignedp, pvolatilep, &alignment);
+ punsignedp, pvolatilep);
if ((inner == exp && and_mask == 0)
|| *pbitsize < 0 || offset != 0
|| TREE_CODE (inner) == PLACEHOLDER_EXPR)
the same. But, this is computer arithmetic, where numbers are finite.
We can therefore make the transformation of any unbounded range with
the value Z, Z being greater than any representable number. This permits
- us to treat unbounded ranges as equal. */
+ us to treat unbounded ranges as equal. */
sgn0 = arg0 != 0 ? 0 : (upper0_p ? 1 : -1);
sgn1 = arg1 != 0 ? 0 : (upper1_p ? 1 : -1);
switch (code)
constant. */
t1 = extract_muldiv (op0, c, code, wide_type);
t2 = extract_muldiv (op1, c, code, wide_type);
- if (t1 != 0 && t2 != 0)
+ if (t1 != 0 && t2 != 0
+ && (code == MULT_EXPR
+ /* If not multiplication, we can only do this if either operand
+ is divisible by c. */
+ || multiple_of_p (ctype, op0, c)
+ || multiple_of_p (ctype, op1, c)))
return fold (build (tcode, ctype, convert (ctype, t1),
convert (ctype, t2)));
{
if (code == CEIL_DIV_EXPR)
code = FLOOR_DIV_EXPR;
- else if (code == CEIL_MOD_EXPR)
- code = FLOOR_MOD_EXPR;
else if (code == FLOOR_DIV_EXPR)
code = CEIL_DIV_EXPR;
- else if (code == FLOOR_MOD_EXPR)
- code = CEIL_MOD_EXPR;
- else if (code != MULT_EXPR)
+ else if (code != MULT_EXPR
+ && code != CEIL_MOD_EXPR && code != FLOOR_MOD_EXPR)
break;
}
/* The last case is if we are a multiply. In that case, we can
apply the distributive law to commute the multiply and addition
- if the multiplication of the constants doesn't overflow. */
+ if the multiplication of the constants doesn't overflow. */
if (code == MULT_EXPR)
return fold (build (tcode, ctype, fold (build (code, ctype,
convert (ctype, op0),
&& integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
return omit_one_operand (type, integer_zero_node, op0);
- /* ... fall through ... */
+ /* ... fall through ... */
case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR: case EXACT_DIV_EXPR:
multiple of the other, in which case we replace this with either an
operation or CODE or TCODE.
- If we have an unsigned type that is not a sizetype, we canot do
+ If we have an unsigned type that is not a sizetype, we cannot do
this since it will change the result if the original computation
overflowed. */
if ((! TREE_UNSIGNED (ctype)
tree expr;
int lim;
{
- int true, false;
+ int ctrue, cfalse;
if (TREE_CODE (expr) != COND_EXPR)
return 0;
else if (lim <= 0)
return 0;
- true = count_cond (TREE_OPERAND (expr, 1), lim - 1);
- false = count_cond (TREE_OPERAND (expr, 2), lim - 1 - true);
- return MIN (lim, 1 + true + false);
+ ctrue = count_cond (TREE_OPERAND (expr, 1), lim - 1);
+ cfalse = count_cond (TREE_OPERAND (expr, 2), lim - 1 - ctrue);
+ return MIN (lim, 1 + ctrue + cfalse);
}
+
+/* Transform `a + (b ? x : y)' into `x ? (a + b) : (a + y)'.
+ Transform, `a + (x < y)' into `(x < y) ? (a + 1) : (a + 0)'. Here
+ CODE corresponds to the `+', COND to the `(b ? x : y)' or `(x < y)'
+ expression, and ARG to `a'. If COND_FIRST_P is non-zero, then the
+ COND is the first argument to CODE; otherwise (as in the example
+ given here), it is the second argument. TYPE is the type of the
+ original expression. */
+
+static tree
+fold_binary_op_with_conditional_arg (code, type, cond, arg, cond_first_p)
+ enum tree_code code;
+ tree type;
+ tree cond;
+ tree arg;
+ int cond_first_p;
+{
+ tree test, true_value, false_value;
+ tree lhs = NULL_TREE;
+ tree rhs = NULL_TREE;
+ /* In the end, we'll produce a COND_EXPR. Both arms of the
+ conditional expression will be binary operations. The left-hand
+ side of the expression to be executed if the condition is true
+ will be pointed to by TRUE_LHS. Similarly, the right-hand side
+ of the expression to be executed if the condition is true will be
+ pointed to by TRUE_RHS. FALSE_LHS and FALSE_RHS are analogous --
+ but apply to the expression to be executed if the conditional is
+ false. */
+ tree *true_lhs;
+ tree *true_rhs;
+ tree *false_lhs;
+ tree *false_rhs;
+ /* These are the codes to use for the left-hand side and right-hand
+ side of the COND_EXPR. Normally, they are the same as CODE. */
+ enum tree_code lhs_code = code;
+ enum tree_code rhs_code = code;
+ /* And these are the types of the expressions. */
+ tree lhs_type = type;
+ tree rhs_type = type;
+
+ if (cond_first_p)
+ {
+ true_rhs = false_rhs = &arg;
+ true_lhs = &true_value;
+ false_lhs = &false_value;
+ }
+ else
+ {
+ true_lhs = false_lhs = &arg;
+ true_rhs = &true_value;
+ false_rhs = &false_value;
+ }
+
+ if (TREE_CODE (cond) == COND_EXPR)
+ {
+ test = TREE_OPERAND (cond, 0);
+ true_value = TREE_OPERAND (cond, 1);
+ false_value = TREE_OPERAND (cond, 2);
+ /* If this operand throws an expression, then it does not make
+ sense to try to perform a logical or arithmetic operation
+ involving it. Instead of building `a + throw 3' for example,
+ we simply build `a, throw 3'. */
+ if (VOID_TYPE_P (TREE_TYPE (true_value)))
+ {
+ lhs_code = COMPOUND_EXPR;
+ if (!cond_first_p)
+ lhs_type = void_type_node;
+ }
+ if (VOID_TYPE_P (TREE_TYPE (false_value)))
+ {
+ rhs_code = COMPOUND_EXPR;
+ if (!cond_first_p)
+ rhs_type = void_type_node;
+ }
+ }
+ else
+ {
+ tree testtype = TREE_TYPE (cond);
+ test = cond;
+ true_value = convert (testtype, integer_one_node);
+ false_value = convert (testtype, integer_zero_node);
+ }
+
+ /* If ARG is complex we want to make sure we only evaluate
+ it once. Though this is only required if it is volatile, it
+ might be more efficient even if it is not. However, if we
+ succeed in folding one part to a constant, we do not need
+ to make this SAVE_EXPR. Since we do this optimization
+ primarily to see if we do end up with constant and this
+ SAVE_EXPR interferes with later optimizations, suppressing
+ it when we can is important.
+
+ If we are not in a function, we can't make a SAVE_EXPR, so don't
+ try to do so. Don't try to see if the result is a constant
+ if an arm is a COND_EXPR since we get exponential behavior
+ in that case. */
+
+ if (TREE_CODE (arg) != SAVE_EXPR && ! TREE_CONSTANT (arg)
+ && global_bindings_p () == 0
+ && ((TREE_CODE (arg) != VAR_DECL
+ && TREE_CODE (arg) != PARM_DECL)
+ || TREE_SIDE_EFFECTS (arg)))
+ {
+ if (TREE_CODE (true_value) != COND_EXPR)
+ lhs = fold (build (lhs_code, lhs_type, *true_lhs, *true_rhs));
+
+ if (TREE_CODE (false_value) != COND_EXPR)
+ rhs = fold (build (rhs_code, rhs_type, *false_lhs, *false_rhs));
+
+ if ((lhs == 0 || ! TREE_CONSTANT (lhs))
+ && (rhs == 0 || !TREE_CONSTANT (rhs)))
+ arg = save_expr (arg), lhs = rhs = 0;
+ }
+
+ if (lhs == 0)
+ lhs = fold (build (lhs_code, lhs_type, *true_lhs, *true_rhs));
+ if (rhs == 0)
+ rhs = fold (build (rhs_code, rhs_type, *false_lhs, *false_rhs));
+
+ test = fold (build (COND_EXPR, type, test, lhs, rhs));
+
+ if (TREE_CODE (arg) == SAVE_EXPR)
+ return build (COMPOUND_EXPR, type,
+ convert (void_type_node, arg),
+ strip_compound_expr (test, arg));
+ else
+ return convert (type, test);
+}
+
\f
/* Perform constant folding and related simplification of EXPR.
The related simplifications include x*1 => x, x*0 => 0, etc.,
fold (expr)
tree expr;
{
- register tree t = expr;
+ tree t = expr;
tree t1 = NULL_TREE;
tree tem;
tree type = TREE_TYPE (expr);
- register tree arg0 = NULL_TREE, arg1 = NULL_TREE;
- register enum tree_code code = TREE_CODE (t);
- register int kind = TREE_CODE_CLASS (code);
+ tree arg0 = NULL_TREE, arg1 = NULL_TREE;
+ enum tree_code code = TREE_CODE (t);
+ int kind = TREE_CODE_CLASS (code);
int invert;
/* WINS will be nonzero when the switch is done
if all operands are constant. */
/* Don't try to process an RTL_EXPR since its operands aren't trees.
Likewise for a SAVE_EXPR that's already been evaluated. */
- if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t)) != 0)
+ if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t) != 0))
return t;
/* Return right away if a constant. */
}
else if (IS_EXPR_CODE_CLASS (kind) || kind == 'r')
{
- register int len = TREE_CODE_LENGTH (code);
- register int i;
+ int len = first_rtl_op (code);
+ int i;
for (i = 0; i < len; i++)
{
tree op = TREE_OPERAND (t, i);
&& (! TREE_SIDE_EFFECTS (arg0)
|| (global_bindings_p () == 0
&& ! contains_placeholder_p (arg0))))
- {
- tree test, true_value, false_value;
- tree lhs = 0, rhs = 0;
-
- if (TREE_CODE (arg1) == COND_EXPR)
- {
- test = TREE_OPERAND (arg1, 0);
- true_value = TREE_OPERAND (arg1, 1);
- false_value = TREE_OPERAND (arg1, 2);
- }
- else
- {
- tree testtype = TREE_TYPE (arg1);
- test = arg1;
- true_value = convert (testtype, integer_one_node);
- false_value = convert (testtype, integer_zero_node);
- }
-
- /* If ARG0 is complex we want to make sure we only evaluate
- it once. Though this is only required if it is volatile, it
- might be more efficient even if it is not. However, if we
- succeed in folding one part to a constant, we do not need
- to make this SAVE_EXPR. Since we do this optimization
- primarily to see if we do end up with constant and this
- SAVE_EXPR interferes with later optimizations, suppressing
- it when we can is important.
-
- If we are not in a function, we can't make a SAVE_EXPR, so don't
- try to do so. Don't try to see if the result is a constant
- if an arm is a COND_EXPR since we get exponential behavior
- in that case. */
-
- if (TREE_CODE (arg0) != SAVE_EXPR && ! TREE_CONSTANT (arg0)
- && global_bindings_p () == 0
- && ((TREE_CODE (arg0) != VAR_DECL
- && TREE_CODE (arg0) != PARM_DECL)
- || TREE_SIDE_EFFECTS (arg0)))
- {
- if (TREE_CODE (true_value) != COND_EXPR)
- lhs = fold (build (code, type, arg0, true_value));
-
- if (TREE_CODE (false_value) != COND_EXPR)
- rhs = fold (build (code, type, arg0, false_value));
-
- if ((lhs == 0 || ! TREE_CONSTANT (lhs))
- && (rhs == 0 || !TREE_CONSTANT (rhs)))
- arg0 = save_expr (arg0), lhs = rhs = 0;
- }
-
- if (lhs == 0)
- lhs = fold (build (code, type, arg0, true_value));
- if (rhs == 0)
- rhs = fold (build (code, type, arg0, false_value));
-
- test = fold (build (COND_EXPR, type, test, lhs, rhs));
-
- if (TREE_CODE (arg0) == SAVE_EXPR)
- return build (COMPOUND_EXPR, type,
- convert (void_type_node, arg0),
- strip_compound_expr (test, arg0));
- else
- return convert (type, test);
- }
-
+ return
+ fold_binary_op_with_conditional_arg (code, type, arg1, arg0,
+ /*cond_first_p=*/0);
else if (TREE_CODE (arg0) == COMPOUND_EXPR)
return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
&& (! TREE_SIDE_EFFECTS (arg1)
|| (global_bindings_p () == 0
&& ! contains_placeholder_p (arg1))))
- {
- tree test, true_value, false_value;
- tree lhs = 0, rhs = 0;
-
- if (TREE_CODE (arg0) == COND_EXPR)
- {
- test = TREE_OPERAND (arg0, 0);
- true_value = TREE_OPERAND (arg0, 1);
- false_value = TREE_OPERAND (arg0, 2);
- }
- else
- {
- tree testtype = TREE_TYPE (arg0);
- test = arg0;
- true_value = convert (testtype, integer_one_node);
- false_value = convert (testtype, integer_zero_node);
- }
-
- if (TREE_CODE (arg1) != SAVE_EXPR && ! TREE_CONSTANT (arg0)
- && global_bindings_p () == 0
- && ((TREE_CODE (arg1) != VAR_DECL
- && TREE_CODE (arg1) != PARM_DECL)
- || TREE_SIDE_EFFECTS (arg1)))
- {
- if (TREE_CODE (true_value) != COND_EXPR)
- lhs = fold (build (code, type, true_value, arg1));
-
- if (TREE_CODE (false_value) != COND_EXPR)
- rhs = fold (build (code, type, false_value, arg1));
-
- if ((lhs == 0 || ! TREE_CONSTANT (lhs))
- && (rhs == 0 || !TREE_CONSTANT (rhs)))
- arg1 = save_expr (arg1), lhs = rhs = 0;
- }
-
- if (lhs == 0)
- lhs = fold (build (code, type, true_value, arg1));
-
- if (rhs == 0)
- rhs = fold (build (code, type, false_value, arg1));
-
- test = fold (build (COND_EXPR, type, test, lhs, rhs));
- if (TREE_CODE (arg1) == SAVE_EXPR)
- return build (COMPOUND_EXPR, type,
- convert (void_type_node, arg1),
- strip_compound_expr (test, arg1));
- else
- return convert (type, test);
- }
+ return
+ fold_binary_op_with_conditional_arg (code, type, arg0, arg1,
+ /*cond_first_p=*/1);
}
else if (TREE_CODE_CLASS (code) == '<'
&& TREE_CODE (arg0) == COMPOUND_EXPR)
}
return fold_convert (t, arg0);
+ case VIEW_CONVERT_EXPR:
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == VIEW_CONVERT_EXPR)
+ return build1 (VIEW_CONVERT_EXPR, type,
+ TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+ return t;
+
#if 0 /* This loses on &"foo"[0]. */
case ARRAY_REF:
{
/* Convert - (a - b) to (b - a) for non-floating-point. */
else if (TREE_CODE (arg0) == MINUS_EXPR
- && (! FLOAT_TYPE_P (type) || flag_fast_math))
+ && (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations))
return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
TREE_OPERAND (arg0, 0));
{
if (TREE_CODE (arg0) == INTEGER_CST)
{
- if (! TREE_UNSIGNED (type)
- && TREE_INT_CST_HIGH (arg0) < 0)
+ /* If the value is unsigned, then the absolute value is
+ the same as the ordinary value. */
+ if (TREE_UNSIGNED (type))
+ return arg0;
+ /* Similarly, if the value is non-negative. */
+ else if (INT_CST_LT (integer_minus_one_node, arg0))
+ return arg0;
+ /* If the value is negative, then the absolute value is
+ its negation. */
+ else
{
unsigned HOST_WIDE_INT low;
HOST_WIDE_INT high;
}
/* In IEEE floating point, x+0 may not equal x. */
else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
&& real_zerop (arg1))
return non_lvalue (convert (type, arg0));
/* x+(-0) equals x, even for IEEE. */
/* (A << B) + (A >> (Z - B)) if A is unsigned and Z is the size of A
is a rotate of A by B bits. */
{
- register enum tree_code code0, code1;
+ enum tree_code code0, code1;
code0 = TREE_CODE (arg0);
code1 = TREE_CODE (arg1);
if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
TREE_OPERAND (arg1, 0), 0)
&& TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
{
- register tree tree01, tree11;
- register enum tree_code code01, code11;
+ tree tree01, tree11;
+ enum tree_code code01, code11;
tree01 = TREE_OPERAND (arg0, 1);
tree11 = TREE_OPERAND (arg1, 1);
parentheses. Rather than remember where the parentheses were, we
don't associate floats at all. It shouldn't matter much. However,
associating multiplications is only very slightly inaccurate, so do
- that if -ffast-math is specified. */
+ that if -funsafe-math-optimizations is specified. */
if (! wins
&& (! FLOAT_TYPE_P (type)
- || (flag_fast_math && code != MULT_EXPR)))
+ || (flag_unsafe_math_optimizations && code == MULT_EXPR)))
{
tree var0, con0, lit0, var1, con1, lit1;
associate each group together, the constants with literals,
then the result with variables. This increases the chances of
literals being recombined later and of generating relocatable
- expressions for the sum of a constant and literal. */
+ expressions for the sum of a constant and literal. */
var0 = split_tree (arg0, code, &con0, &lit0, 0);
var1 = split_tree (arg1, code, &con1, &lit1, code == MINUS_EXPR);
}
else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
{
/* Except with IEEE floating point, 0-x equals -x. */
if (! wins && real_zerop (arg0))
Also note that operand_equal_p is always false if an operand
is volatile. */
- if ((! FLOAT_TYPE_P (type) || flag_fast_math)
+ if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
&& operand_equal_p (arg0, arg1, 0))
return convert (type, integer_zero_node);
{
/* x*0 is 0, except for IEEE floating point. */
if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
&& real_zerop (arg1))
return omit_one_operand (type, arg1, arg0);
/* In IEEE floating point, x*1 is not equivalent to x for snans.
/* If ARG1 is a constant, we can convert this to a multiply by the
reciprocal. This does not have the same rounding properties,
- so only do this if -ffast-math. We can actually always safely
- do it if ARG1 is a power of two, but it's hard to tell if it is
- or not in a portable manner. */
+ so only do this if -funsafe-math-optimizations. We can actually
+ always safely do it if ARG1 is a power of two, but it's hard to
+ tell if it is or not in a portable manner. */
if (TREE_CODE (arg1) == REAL_CST)
{
- if (flag_fast_math
+ if (flag_unsafe_math_optimizations
&& 0 != (tem = const_binop (code, build_real (type, dconst1),
arg1, 0)))
return fold (build (MULT_EXPR, type, arg0, tem));
- /* Find the reciprocal if optimizing and the result is exact. */
+ /* Find the reciprocal if optimizing and the result is exact. */
else if (optimize)
{
REAL_VALUE_TYPE r;
}
}
}
+ /* Convert A/B/C to A/(B*C). */
+ if (flag_unsafe_math_optimizations
+ && TREE_CODE (arg0) == RDIV_EXPR)
+ {
+ return fold (build (RDIV_EXPR, type, TREE_OPERAND (arg0, 0),
+ build (MULT_EXPR, type, TREE_OPERAND (arg0, 1),
+ arg1)));
+ }
+ /* Convert A/(B/C) to (A/B)*C. */
+ if (flag_unsafe_math_optimizations
+ && TREE_CODE (arg1) == RDIV_EXPR)
+ {
+ return fold (build (MULT_EXPR, type,
+ build (RDIV_EXPR, type, arg0,
+ TREE_OPERAND (arg1, 0)),
+ TREE_OPERAND (arg1, 1)));
+ }
goto binary;
case TRUNC_DIV_EXPR:
truth and/or operations and the transformation will still be
valid. Also note that we only care about order for the
ANDIF and ORIF operators. If B contains side effects, this
- might change the truth-value of A. */
+ might change the truth-value of A. */
if (TREE_CODE (arg0) == TREE_CODE (arg1)
&& (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
|| TREE_CODE (arg0) == TRUTH_ORIF_EXPR
case EQ_EXPR:
case GE_EXPR:
case LE_EXPR:
- if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ if (! FLOAT_TYPE_P (TREE_TYPE (arg0)))
return constant_boolean_node (1, type);
code = EQ_EXPR;
TREE_SET_CODE (t, code);
case NE_EXPR:
/* For NE, we can only do this simplification if integer. */
- if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ if (FLOAT_TYPE_P (TREE_TYPE (arg0)))
break;
/* ... fall through ... */
case GT_EXPR:
}
else if (TREE_INT_CST_HIGH (arg1) == -1
- && (- TREE_INT_CST_LOW (arg1)
+ && (TREE_INT_CST_LOW (arg1)
== ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
&& ! TREE_UNSIGNED (TREE_TYPE (arg1)))
switch (TREE_CODE (t))
}
else if (TREE_INT_CST_HIGH (arg1) == 0
- && (TREE_INT_CST_LOW (arg1)
- == ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1)
- && TREE_UNSIGNED (TREE_TYPE (arg1)))
-
+ && (TREE_INT_CST_LOW (arg1)
+ == ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1)
+ && TREE_UNSIGNED (TREE_TYPE (arg1))
+ /* signed_type does not work on pointer types. */
+ && INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
switch (TREE_CODE (t))
{
case LE_EXPR:
default:
break;
}
+
+ else if (TREE_INT_CST_HIGH (arg1) == 0
+ && (TREE_INT_CST_LOW (arg1)
+ == ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1)
+ && TREE_UNSIGNED (TREE_TYPE (arg1)))
+ switch (TREE_CODE (t))
+ {
+ case GT_EXPR:
+ return omit_one_operand (type,
+ convert (type, integer_zero_node),
+ arg0);
+ case GE_EXPR:
+ TREE_SET_CODE (t, EQ_EXPR);
+ break;
+
+ case LE_EXPR:
+ return omit_one_operand (type,
+ convert (type, integer_one_node),
+ arg0);
+ case LT_EXPR:
+ TREE_SET_CODE (t, NE_EXPR);
+ break;
+
+ default:
+ break;
+ }
}
}
fold (build (code, type, imag0, imag1))));
}
+ /* Optimize comparisons of strlen vs zero to a compare of the
+ first character of the string vs zero. To wit,
+ strlen(ptr) == 0 => *ptr == 0
+ strlen(ptr) != 0 => *ptr != 0
+ Other cases should reduce to one of these two (or a constant)
+ due to the return value of strlen being unsigned. */
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && integer_zerop (arg1)
+ && TREE_CODE (arg0) == CALL_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == ADDR_EXPR)
+ {
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
+ tree arglist;
+
+ if (TREE_CODE (fndecl) == FUNCTION_DECL
+ && DECL_BUILT_IN (fndecl)
+ && DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD
+ && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRLEN
+ && (arglist = TREE_OPERAND (arg0, 1))
+ && TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) == POINTER_TYPE
+ && ! TREE_CHAIN (arglist))
+ return fold (build (code, type,
+ build1 (INDIRECT_REF, char_type_node,
+ TREE_VALUE(arglist)),
+ integer_zero_node));
+ }
+
/* From here on, the only cases we handle are when the result is
known to be a constant.
if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
&& (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
|| ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
&& operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
arg1, TREE_OPERAND (arg0, 1)))
{
return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom)
&& multiple_of_p (type, TREE_OPERAND (top, 1), bottom));
+ case LSHIFT_EXPR:
+ if (TREE_CODE (TREE_OPERAND (top, 1)) == INTEGER_CST)
+ {
+ tree op1, t1;
+
+ op1 = TREE_OPERAND (top, 1);
+ /* const_binop may not detect overflow correctly,
+ so check for it explicitly here. */
+ if (TYPE_PRECISION (TREE_TYPE (size_one_node))
+ > TREE_INT_CST_LOW (op1)
+ && TREE_INT_CST_HIGH (op1) == 0
+ && 0 != (t1 = convert (type,
+ const_binop (LSHIFT_EXPR, size_one_node,
+ op1, 0)))
+ && ! TREE_OVERFLOW (t1))
+ return multiple_of_p (type, t1, bottom);
+ }
+ return 0;
+
case NOP_EXPR:
/* Can't handle conversions from non-integral or wider integral type. */
if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (top, 0))) != INTEGER_TYPE)
< TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (top, 0)))))
return 0;
- /* .. fall through ... */
+ /* .. fall through ... */
case SAVE_EXPR:
return multiple_of_p (type, TREE_OPERAND (top, 0), bottom);
case INTEGER_CST:
- if ((TREE_CODE (bottom) != INTEGER_CST)
- || (tree_int_cst_sgn (top) < 0)
- || (tree_int_cst_sgn (bottom) < 0))
+ if (TREE_CODE (bottom) != INTEGER_CST
+ || (TREE_UNSIGNED (type)
+ && (tree_int_cst_sgn (top) < 0
+ || tree_int_cst_sgn (bottom) < 0)))
return 0;
return integer_zerop (const_binop (TRUNC_MOD_EXPR,
top, bottom, 0));
{
switch (TREE_CODE (t))
{
+ case ABS_EXPR:
+ case FFS_EXPR:
+ return 1;
case INTEGER_CST:
return tree_int_cst_sgn (t) >= 0;
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0))
+ && tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case TRUNC_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0));
case COND_EXPR:
return tree_expr_nonnegative_p (TREE_OPERAND (t, 1))
&& tree_expr_nonnegative_p (TREE_OPERAND (t, 2));
+ case COMPOUND_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case MIN_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0))
+ && tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case MAX_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0))
+ || tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case MODIFY_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
case BIND_EXPR:
return tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case SAVE_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0));
+ case NON_LVALUE_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0));
case RTL_EXPR:
return rtl_expr_nonnegative_p (RTL_EXPR_RTL (t));