2010-08-12 Richard Guenther <rguenther@suse.de>
+ PR tree-optimization/45232
+ * tree-ssa-reassoc.c (can_reassociate_p): Disable re-association
+ for types with undefined overflow.
+ (reassociate_bb): Allow re-associating of bit and min/max
+ operations for types with undefined overflow.
+ * tree-ssa-forwprop.c (associate_plusminus): New function.
+ (tree_ssa_forward_propagate_single_use_vars): Call it.
+
+2010-08-12 Richard Guenther <rguenther@suse.de>
+
* tree-flow.h (struct ptr_info_def): Add align and misalign fields.
* tree-ssa-alias.c (get_ptr_info): Move ...
* tree-ssanames.c (get_ptr_info): ... here. Initialize
+2010-08-12 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/45232
+ * gcc.dg/tree-ssa/pr44133.c: Adjust warning location.
+ * gcc.dg/tree-ssa/loop-7.c: Adjust.
+ * gcc.dg/tree-ssa/reassoc-1.c: XFAIL.
+ * gcc.dg/tree-ssa/reassoc-20.c: Add reassoc-1.c variant with
+ unsigned arithmetic.
+ * gcc.dg/tree-ssa/reassoc-14.c: Use unsigned arithmetic.
+ * gcc.dg/tree-ssa/reassoc-15.c: Likewise.
+ * gcc.dg/tree-ssa/reassoc-18.c: Likewise.
+ * gcc.dg/tree-ssa/reassoc-2.c: XFAIL.
+ * gcc.dg/tree-ssa/reassoc-21.c: Add reassoc-2.c variant with
+ unsigned arithmetic.
+ * gcc.dg/tree-ssa/reassoc-6.c: XFAIL.
+ * gcc.dg/tree-ssa/reassoc-22.c: Add reassoc-6.c variant with
+ unsigned arithmetic.
+ * gcc.dg/tree-ssa/reassoc-7.c: Use unsigned arithmetic.
+ * gcc.dg/tree-ssa/reassoc-9.c: XFAIL.
+ * gcc.dg/tree-ssa/reassoc-23.c: Add reassoc-9.c variant with
+ unsigned arithmetic.
+ * gcc.dg/tree-ssa/ssa-pre-2.c: Adjust.
+ * gcc.dg/tree-ssa/negate.c: Adjust.
+ * gcc.dg/vect/vect-1.c: Adjust.
+ * gfortran.dg/reassoc_6.f: XFAIL.
+
2010-08-12 Jakub Jelinek <jakub@redhat.com>
PR debug/45259
Calls to cst_fun2 and pure_fun2 should not be, since calling
with k = 0 may be invalid. */
-/* { dg-final { scan-tree-dump-times "Moving statement" 3 "lim1" } } */
+/* { dg-final { scan-tree-dump-times "Moving statement" 2 "lim1" } } */
/* { dg-final { cleanup-tree-dump "lim\[1-2\]" } } */
return y;
}
+/* We tested for reassociation to -(a + b) on the following which
+ isn't a transform that makes things cheaper. With reassoc
+ no longer applying to types with undefined overflow we lost
+ this transform.
+
int g (int a, int b)
{
int x = -a;
return y;
}
-/* There should be two additions now. */
-/* { dg-final { scan-tree-dump-times "\\+" 2 "reassoc1"} } */
+*/
+
+/* There should be an addition now. */
+/* { dg-final { scan-tree-dump-times "\\+" 1 "reassoc1"} } */
/* { dg-final { cleanup-tree-dump "reassoc1" } } */
int foo (int l)
{
struct S s;
- s.j = l - 22; /* { dg-warning ".s\.i. is used uninitialized" } */
- return s.i + s.j;
+ s.j = l - 22;
+ return s.i + s.j; /* { dg-warning ".s\.i. is used uninitialized" } */
}
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-optimized" } */
+
int a, b, c, d;
extern int printf (const char *, ...);
int main(void)
printf ("%d %d\n", e, f);
}
-/* { dg-final { scan-tree-dump-times "b.._. \\\+ a.._." 1 "optimized"} } */
-/* { dg-final { scan-tree-dump-times " \\\+ " 2 "optimized"} } */
+/* We cannot reassociate these expressions because of undefined signed
+ integer overflow. Instead the value-numberer has to be extended
+ to canonicalize these expressions. */
+
+/* { dg-final { scan-tree-dump-times "b.._. \\\+ a.._." 1 "optimized" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times " \\\+ " 2 "optimized" { xfail *-*-* } } } */
/* { dg-final { cleanup-tree-dump "optimized" } } */
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-reassoc1" } */
-int test1 (int x, int y, int z, int weight)
+unsigned int test1 (unsigned int x, unsigned int y, unsigned int z,
+ unsigned int weight)
{
- int tmp1 = x * weight;
- int tmp2 = y * weight;
- int tmp3 = (x - y) * weight;
+ unsigned int tmp1 = x * weight;
+ unsigned int tmp2 = y * weight;
+ unsigned int tmp3 = (x - y) * weight;
return tmp1 + (tmp2 + tmp3);
}
-int test2 (int x, int y, int z, int weight)
+unsigned int test2 (unsigned int x, unsigned int y, unsigned int z,
+ unsigned int weight)
{
- int tmp1 = x * weight;
- int tmp2 = y * weight * weight;
- int tmp3 = z * weight * weight * weight;
+ unsigned int tmp1 = x * weight;
+ unsigned int tmp2 = y * weight * weight;
+ unsigned int tmp3 = z * weight * weight * weight;
return tmp1 + tmp2 + tmp3;
}
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-reassoc1" } */
-int test3 (int x, int y, int z, int weight, int w1, int w2, int w3)
+unsigned int test3 (unsigned int x, unsigned int y, unsigned int z,
+ unsigned int weight,
+ unsigned int w1, unsigned int w2, unsigned int w3)
{
- int wtmp1 = w1 * weight;
- int wtmp2 = w2 * weight;
- int wtmp3 = w3 * weight;
- int tmp1 = x * wtmp1;
- int tmp2 = y * wtmp2;
- int tmp3 = z * wtmp3;
+ unsigned int wtmp1 = w1 * weight;
+ unsigned int wtmp2 = w2 * weight;
+ unsigned int wtmp3 = w3 * weight;
+ unsigned int tmp1 = x * wtmp1;
+ unsigned int tmp2 = y * wtmp2;
+ unsigned int tmp3 = z * wtmp3;
return tmp1 + tmp2 + tmp3;
}
/* { dg-do compile } */
/* { dg-options "-O -fdump-tree-reassoc1" } */
-int
-ETree_nFactorEntriesInFront (int b, int m)
+unsigned int
+ETree_nFactorEntriesInFront (unsigned int b, unsigned int m)
{
- int nent = b*b + 2*b*m;
+ unsigned int nent = b*b + 2*b*m;
return nent;
}
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-optimized" } */
+
int f (int a0,int a1,int a2,int a3,int a4)
{
int b0, b1, b2, b3, b4,e;
return e;
}
-/* { dg-final { scan-tree-dump-times "return 0" 1 "optimized" } } */
+/* We can't reassociate the expressions due to undefined signed overflow. */
+
+/* { dg-final { scan-tree-dump-times "return 0" 1 "optimized" { xfail *-*-* } } } */
/* { dg-final { cleanup-tree-dump "optimized" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+
+unsigned int a, b, c, d;
+extern int printf (const char *, ...);
+int main(void)
+{
+ unsigned int e;
+ unsigned int f;
+ /* We should be able to transform these into the same expression, and only have two additions. */
+ e = a + b;
+ e = e + c;
+ f = c + a;
+ f = f + b;
+ printf ("%d %d\n", e, f);
+}
+
+/* { dg-final { scan-tree-dump-times "b.._. \\\+ a.._." 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times " \\\+ " 2 "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+
+unsigned int f (unsigned int a0, unsigned int a1, unsigned int a2,
+ unsigned int a3, unsigned int a4)
+{
+ unsigned int b0, b1, b2, b3, b4, e;
+ /* this can be optimized to four additions... */
+ b4 = a4 + a3 + a2 + a1 + a0;
+ b3 = a3 + a2 + a1 + a0;
+ b2 = a2 + a1 + a0;
+ b1 = a1 + a0;
+ /* This is actually 0 */
+ e = b4 - b3 + b2 - b1 - a4 - a2;
+ return e;
+}
+
+/* { dg-final { scan-tree-dump-times "return 0" 1 "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-reassoc1" } */
+unsigned int foo(unsigned int a, unsigned int b, unsigned int c, unsigned int d)
+{
+ /* Should be transformed into a + c + 8 */
+ unsigned int e = a + 3;
+ unsigned int f = c + 5;
+ unsigned int g = e + f;
+ return g;
+}
+
+/* { dg-final { scan-tree-dump-times "\\\+ 8" 1 "reassoc1"} } */
+/* { dg-final { cleanup-tree-dump "reassoc1" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-reassoc1" } */
+
+unsigned int
+foo(unsigned int a, unsigned int b, unsigned int c, unsigned int d,
+ unsigned int e, unsigned int f, unsigned int g, unsigned int h)
+{
+ /* Should be transformed into e = 20 */
+ unsigned int i = (a + 9) + (c + 8);
+ unsigned int j = (-c + 1) + (-a + 2);
+
+ e = i + j;
+ return e;
+}
+
+/* { dg-final { scan-tree-dump-times "= 20" 1 "reassoc1"} } */
+/* { dg-final { cleanup-tree-dump "reassoc1" } } */
return g;
}
-/* { dg-final { scan-tree-dump-times "\\\+ 8" 1 "reassoc1"} } */
+/* We cannot re-associate the additions due to undefined signed overflow. */
+
+/* { dg-final { scan-tree-dump-times "\\\+ 8" 1 "reassoc1" { xfail *-*-* } } } */
/* { dg-final { cleanup-tree-dump "reassoc1" } } */
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-reassoc1" } */
-int main(int a, int b, int c, int d, int e, int f, int g, int h)
+
+unsigned int
+foo(unsigned int a, unsigned int b, unsigned int c, unsigned int d,
+ unsigned int e, unsigned int f, unsigned int g, unsigned int h)
{
/* Should be transformed into a + c + d + e + g + 15 */
- int i = (a + 9) + (c + d);
- int j = (e + 4) + (2 + g);
+ unsigned int i = (a + 9) + (c + d);
+ unsigned int j = (e + 4) + (2 + g);
e = i + j;
return e;
}
+
/* { dg-final { scan-tree-dump-times "\\\+ 15" 1 "reassoc1"} } */
/* { dg-final { cleanup-tree-dump "reassoc1" } } */
e = i + j;
return e;
}
-/* { dg-final { scan-tree-dump-times "= 20" 1 "reassoc1"} } */
+
+/* We can always re-associate to a final constant but the current
+ implementation does not allow easy roll-back without IL changes. */
+
+/* { dg-final { scan-tree-dump-times "= 20" 1 "reassoc1" { xfail *-*-* } } } */
/* { dg-final { cleanup-tree-dump "reassoc1" } } */
return v * t * u;
}
/* We should eliminate one computation of data_0 + data_3 along the
- main path, and one computation of v * i along the main path, causing
- two eliminations. */
-/* { dg-final { scan-tree-dump-times "Eliminated: 2" 1 "pre"} } */
+ main path. We cannot re-associate v * t * u due to undefined
+ signed overflow so we do not eliminate one computation of v * i along
+ the main path. */
+/* { dg-final { scan-tree-dump-times "Eliminated: 2" 1 "pre" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "Eliminated: 1" 1 "pre" } } */
/* { dg-final { cleanup-tree-dump "pre" } } */
char image[N][N];
char block[N][N];
- /* Not vectorizable yet (cross-iteration cycle). */
+ /* Vectorizable. */
diff = 0;
for (i = 0; i < N; i++) {
diff += (cb[i] - cc[i]);
ibar (&diff);
- /* Not vectorizable yet (outer-loop: not attempted.
- inner-loop: cross iteration cycle; multi-dimensional arrays). */
+ /* Vectorizable. */
diff = 0;
for (i = 0; i < N; i++) {
for (i = 0; i < N; i++) {
fbar (a);
}
-/* { dg-final { scan-tree-dump-times "vectorized 4 loops" 1 "vect" { target vect_extract_even_odd_wide } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 3 loops" 1 "vect" { xfail vect_extract_even_odd_wide } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 6 loops" 1 "vect" { target vect_extract_even_odd_wide } } } */
+/* { dg-final { scan-tree-dump-times "vectorized 5 loops" 1 "vect" { xfail vect_extract_even_odd_wide } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
return
end
! Verify that offset of the first element is simplified
-! { dg-final { scan-tree-dump-not "~" "optimized" } }
+! While we understand to combine x + ~x IVOPTs now messes things
+! up by hiding that operation in casts to unsigned.
+! { dg-final { scan-tree-dump-not "~" "optimized" { xfail *-*-* } } }
! { dg-final { cleanup-tree-dump "optimized" } }
return;
}
+
+/* Perform re-associations of the plus or minus statement STMT that are
+ always permitted. */
+
+static void
+associate_plusminus (gimple stmt)
+{
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+ gimple_stmt_iterator gsi;
+ bool changed;
+
+ /* We can't reassociate at all for saturating types. */
+ if (TYPE_SATURATING (TREE_TYPE (rhs1)))
+ return;
+
+ /* First contract negates. */
+ do
+ {
+ changed = false;
+
+ /* A +- (-B) -> A -+ B. */
+ if (TREE_CODE (rhs2) == SSA_NAME)
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (rhs2);
+ if (is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == NEGATE_EXPR)
+ {
+ code = (code == MINUS_EXPR) ? PLUS_EXPR : MINUS_EXPR;
+ gimple_assign_set_rhs_code (stmt, code);
+ rhs2 = gimple_assign_rhs1 (def_stmt);
+ gimple_assign_set_rhs2 (stmt, rhs2);
+ gimple_set_modified (stmt, true);
+ changed = true;
+ }
+ }
+
+ /* (-A) + B -> B - A. */
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && code == PLUS_EXPR)
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (rhs1);
+ if (is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == NEGATE_EXPR)
+ {
+ code = MINUS_EXPR;
+ gimple_assign_set_rhs_code (stmt, code);
+ rhs1 = rhs2;
+ gimple_assign_set_rhs1 (stmt, rhs1);
+ rhs2 = gimple_assign_rhs1 (def_stmt);
+ gimple_assign_set_rhs2 (stmt, rhs2);
+ gimple_set_modified (stmt, true);
+ changed = true;
+ }
+ }
+ }
+ while (changed);
+
+ /* We can't reassociate floating-point or fixed-point plus or minus
+ because of saturation to +-Inf. */
+ if (FLOAT_TYPE_P (TREE_TYPE (rhs1))
+ || FIXED_POINT_TYPE_P (TREE_TYPE (rhs1)))
+ goto out;
+
+ /* Second match patterns that allow contracting a plus-minus pair
+ irrespective of overflow issues.
+
+ (A +- B) - A -> +- B
+ (A +- B) -+ B -> A
+ (CST +- A) +- CST -> CST +- A
+ (A + CST) +- CST -> A + CST
+ ~A + A -> -1
+ ~A + 1 -> -A
+ A - (A +- B) -> -+ B
+ A +- (B +- A) -> +- B
+ CST +- (CST +- A) -> CST +- A
+ CST +- (A +- CST) -> CST +- A
+ A + ~A -> -1
+
+ via commutating the addition and contracting operations to zero
+ by reassociation. */
+
+ gsi = gsi_for_stmt (stmt);
+ if (TREE_CODE (rhs1) == SSA_NAME)
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (rhs1);
+ if (is_gimple_assign (def_stmt))
+ {
+ enum tree_code def_code = gimple_assign_rhs_code (def_stmt);
+ if (def_code == PLUS_EXPR
+ || def_code == MINUS_EXPR)
+ {
+ tree def_rhs1 = gimple_assign_rhs1 (def_stmt);
+ tree def_rhs2 = gimple_assign_rhs2 (def_stmt);
+ if (operand_equal_p (def_rhs1, rhs2, 0)
+ && code == MINUS_EXPR)
+ {
+ /* (A +- B) - A -> +- B. */
+ code = ((def_code == PLUS_EXPR)
+ ? TREE_CODE (def_rhs2) : NEGATE_EXPR);
+ rhs1 = def_rhs2;
+ rhs2 = NULL_TREE;
+ gimple_assign_set_rhs_with_ops (&gsi, code, rhs1, NULL_TREE);
+ gcc_assert (gsi_stmt (gsi) == stmt);
+ gimple_set_modified (stmt, true);
+ }
+ else if (operand_equal_p (def_rhs2, rhs2, 0)
+ && code != def_code)
+ {
+ /* (A +- B) -+ B -> A. */
+ code = TREE_CODE (def_rhs1);
+ rhs1 = def_rhs1;
+ rhs2 = NULL_TREE;
+ gimple_assign_set_rhs_with_ops (&gsi, code, rhs1, NULL_TREE);
+ gcc_assert (gsi_stmt (gsi) == stmt);
+ gimple_set_modified (stmt, true);
+ }
+ else if (TREE_CODE (rhs2) == INTEGER_CST
+ && TREE_CODE (def_rhs1) == INTEGER_CST)
+ {
+ /* (CST +- A) +- CST -> CST +- A. */
+ tree cst = fold_binary (code, TREE_TYPE (rhs1),
+ def_rhs1, rhs2);
+ if (cst && !TREE_OVERFLOW (cst))
+ {
+ code = def_code;
+ gimple_assign_set_rhs_code (stmt, code);
+ rhs1 = cst;
+ gimple_assign_set_rhs1 (stmt, rhs1);
+ rhs2 = def_rhs2;
+ gimple_assign_set_rhs2 (stmt, rhs2);
+ gimple_set_modified (stmt, true);
+ }
+ }
+ else if (TREE_CODE (rhs2) == INTEGER_CST
+ && TREE_CODE (def_rhs2) == INTEGER_CST
+ && def_code == PLUS_EXPR)
+ {
+ /* (A + CST) +- CST -> A + CST. */
+ tree cst = fold_binary (code, TREE_TYPE (rhs1),
+ def_rhs2, rhs2);
+ if (cst && !TREE_OVERFLOW (cst))
+ {
+ code = PLUS_EXPR;
+ gimple_assign_set_rhs_code (stmt, code);
+ rhs1 = def_rhs1;
+ gimple_assign_set_rhs1 (stmt, rhs1);
+ rhs2 = cst;
+ gimple_assign_set_rhs2 (stmt, rhs2);
+ gimple_set_modified (stmt, true);
+ }
+ }
+ }
+ else if (def_code == BIT_NOT_EXPR
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ {
+ tree def_rhs1 = gimple_assign_rhs1 (def_stmt);
+ if (code == PLUS_EXPR
+ && operand_equal_p (def_rhs1, rhs2, 0))
+ {
+ /* ~A + A -> -1. */
+ code = INTEGER_CST;
+ rhs1 = build_int_cst (TREE_TYPE (rhs2), -1);
+ rhs2 = NULL_TREE;
+ gimple_assign_set_rhs_with_ops (&gsi, code, rhs1, NULL_TREE);
+ gcc_assert (gsi_stmt (gsi) == stmt);
+ gimple_set_modified (stmt, true);
+ }
+ else if (code == PLUS_EXPR
+ && integer_onep (rhs1))
+ {
+ /* ~A + 1 -> -A. */
+ code = NEGATE_EXPR;
+ rhs1 = def_rhs1;
+ rhs2 = NULL_TREE;
+ gimple_assign_set_rhs_with_ops (&gsi, code, rhs1, NULL_TREE);
+ gcc_assert (gsi_stmt (gsi) == stmt);
+ gimple_set_modified (stmt, true);
+ }
+ }
+ }
+ }
+
+ if (rhs2 && TREE_CODE (rhs2) == SSA_NAME)
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (rhs2);
+ if (is_gimple_assign (def_stmt))
+ {
+ enum tree_code def_code = gimple_assign_rhs_code (def_stmt);
+ if (def_code == PLUS_EXPR
+ || def_code == MINUS_EXPR)
+ {
+ tree def_rhs1 = gimple_assign_rhs1 (def_stmt);
+ tree def_rhs2 = gimple_assign_rhs2 (def_stmt);
+ if (operand_equal_p (def_rhs1, rhs1, 0)
+ && code == MINUS_EXPR)
+ {
+ /* A - (A +- B) -> -+ B. */
+ code = ((def_code == PLUS_EXPR)
+ ? NEGATE_EXPR : TREE_CODE (def_rhs2));
+ rhs1 = def_rhs2;
+ rhs2 = NULL_TREE;
+ gimple_assign_set_rhs_with_ops (&gsi, code, rhs1, NULL_TREE);
+ gcc_assert (gsi_stmt (gsi) == stmt);
+ gimple_set_modified (stmt, true);
+ }
+ else if (operand_equal_p (def_rhs2, rhs1, 0)
+ && code != def_code)
+ {
+ /* A +- (B +- A) -> +- B. */
+ code = ((code == PLUS_EXPR)
+ ? TREE_CODE (def_rhs1) : NEGATE_EXPR);
+ rhs1 = def_rhs1;
+ rhs2 = NULL_TREE;
+ gimple_assign_set_rhs_with_ops (&gsi, code, rhs1, NULL_TREE);
+ gcc_assert (gsi_stmt (gsi) == stmt);
+ gimple_set_modified (stmt, true);
+ }
+ else if (TREE_CODE (rhs1) == INTEGER_CST
+ && TREE_CODE (def_rhs1) == INTEGER_CST)
+ {
+ /* CST +- (CST +- A) -> CST +- A. */
+ tree cst = fold_binary (code, TREE_TYPE (rhs2),
+ rhs1, def_rhs1);
+ if (cst && !TREE_OVERFLOW (cst))
+ {
+ code = (code == def_code ? PLUS_EXPR : MINUS_EXPR);
+ gimple_assign_set_rhs_code (stmt, code);
+ rhs1 = cst;
+ gimple_assign_set_rhs1 (stmt, rhs1);
+ rhs2 = def_rhs2;
+ gimple_assign_set_rhs2 (stmt, rhs2);
+ gimple_set_modified (stmt, true);
+ }
+ }
+ else if (TREE_CODE (rhs1) == INTEGER_CST
+ && TREE_CODE (def_rhs2) == INTEGER_CST)
+ {
+ /* CST +- (A +- CST) -> CST +- A. */
+ tree cst = fold_binary (def_code == code
+ ? PLUS_EXPR : MINUS_EXPR,
+ TREE_TYPE (rhs2),
+ rhs1, def_rhs2);
+ if (cst && !TREE_OVERFLOW (cst))
+ {
+ rhs1 = cst;
+ gimple_assign_set_rhs1 (stmt, rhs1);
+ rhs2 = def_rhs1;
+ gimple_assign_set_rhs2 (stmt, rhs2);
+ gimple_set_modified (stmt, true);
+ }
+ }
+ }
+ else if (def_code == BIT_NOT_EXPR
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs2)))
+ {
+ tree def_rhs1 = gimple_assign_rhs1 (def_stmt);
+ if (code == PLUS_EXPR
+ && operand_equal_p (def_rhs1, rhs1, 0))
+ {
+ /* A + ~A -> -1. */
+ code = INTEGER_CST;
+ rhs1 = build_int_cst (TREE_TYPE (rhs1), -1);
+ rhs2 = NULL_TREE;
+ gimple_assign_set_rhs_with_ops (&gsi, code, rhs1, NULL_TREE);
+ gcc_assert (gsi_stmt (gsi) == stmt);
+ gimple_set_modified (stmt, true);
+ }
+ }
+ }
+ }
+
+out:
+ if (gimple_modified_p (stmt))
+ {
+ fold_stmt_inplace (stmt);
+ update_stmt (stmt);
+ }
+}
+
/* Main entry point for the forward propagation optimizer. */
static unsigned int
simplify_bitwise_and (&gsi, stmt);
gsi_next (&gsi);
}
+ else if (gimple_assign_rhs_code (stmt) == PLUS_EXPR
+ || gimple_assign_rhs_code (stmt) == MINUS_EXPR)
+ {
+ associate_plusminus (stmt);
+ gsi_next (&gsi);
+ }
else
gsi_next (&gsi);
}
can_reassociate_p (tree op)
{
tree type = TREE_TYPE (op);
- if (INTEGRAL_TYPE_P (type)
+ if ((INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
|| NON_SAT_FIXED_POINT_TYPE_P (type)
|| (flag_associative_math && FLOAT_TYPE_P (type)))
return true;
rhs1 = gimple_assign_rhs1 (stmt);
rhs2 = gimple_assign_rhs2 (stmt);
- if (!can_reassociate_p (lhs)
- || !can_reassociate_p (rhs1)
- || !can_reassociate_p (rhs2))
+ /* For non-bit or min/max operations we can't associate
+ all types. Verify that here. */
+ if (rhs_code != BIT_IOR_EXPR
+ && rhs_code != BIT_AND_EXPR
+ && rhs_code != BIT_XOR_EXPR
+ && rhs_code != MIN_EXPR
+ && rhs_code != MAX_EXPR
+ && (!can_reassociate_p (lhs)
+ || !can_reassociate_p (rhs1)
+ || !can_reassociate_p (rhs2)))
continue;
if (associative_tree_code (rhs_code))