extern rtx_insn *split_insns (rtx, rtx_insn *);
/* In simplify-rtx.c */
+
+/* A class that records the context in which a simplification
+ is being mode. */
+class simplify_context
+{
+public:
+ rtx simplify_unary_operation (rtx_code, machine_mode, rtx, machine_mode);
+ rtx simplify_binary_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_ternary_operation (rtx_code, machine_mode, machine_mode,
+ rtx, rtx, rtx);
+ rtx simplify_relational_operation (rtx_code, machine_mode, machine_mode,
+ rtx, rtx);
+ rtx simplify_subreg (machine_mode, rtx, machine_mode, poly_uint64);
+
+ rtx lowpart_subreg (machine_mode, rtx, machine_mode);
+
+ rtx simplify_merge_mask (rtx, rtx, int);
+
+ rtx simplify_gen_unary (rtx_code, machine_mode, rtx, machine_mode);
+ rtx simplify_gen_binary (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_gen_ternary (rtx_code, machine_mode, machine_mode,
+ rtx, rtx, rtx);
+ rtx simplify_gen_relational (rtx_code, machine_mode, machine_mode, rtx, rtx);
+ rtx simplify_gen_subreg (machine_mode, rtx, machine_mode, poly_uint64);
+
+ /* Tracks the level of MEM nesting for the value being simplified:
+ 0 means the value is not in a MEM, >0 means it is. This is needed
+ because the canonical representation of multiplication is different
+ inside a MEM than outside. */
+ unsigned int mem_depth = 0;
+
+private:
+ rtx simplify_truncation (machine_mode, rtx, machine_mode);
+ rtx simplify_byte_swapping_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_associative_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_distributive_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_logical_relational_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_binary_operation_series (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_distribute_over_subregs (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_shift_const_int (rtx_code, machine_mode, rtx, unsigned int);
+ rtx simplify_plus_minus (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_cond_clz_ctz (rtx, rtx_code, rtx, rtx);
+
+ rtx simplify_unary_operation_1 (rtx_code, machine_mode, rtx);
+ rtx simplify_binary_operation_1 (rtx_code, machine_mode, rtx, rtx, rtx, rtx);
+ rtx simplify_ternary_operation_1 (rtx_code, machine_mode, machine_mode,
+ rtx, rtx, rtx);
+ rtx simplify_relational_operation_1 (rtx_code, machine_mode, machine_mode,
+ rtx, rtx);
+};
+
+inline rtx
+simplify_unary_operation (rtx_code code, machine_mode mode, rtx op,
+ machine_mode op_mode)
+{
+ return simplify_context ().simplify_unary_operation (code, mode, op,
+ op_mode);
+}
+
+inline rtx
+simplify_binary_operation (rtx_code code, machine_mode mode, rtx op0, rtx op1)
+{
+ return simplify_context ().simplify_binary_operation (code, mode, op0, op1);
+}
+
+inline rtx
+simplify_ternary_operation (rtx_code code, machine_mode mode,
+ machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
+{
+ return simplify_context ().simplify_ternary_operation (code, mode, op0_mode,
+ op0, op1, op2);
+}
+
+inline rtx
+simplify_relational_operation (rtx_code code, machine_mode mode,
+ machine_mode op_mode, rtx op0, rtx op1)
+{
+ return simplify_context ().simplify_relational_operation (code, mode,
+ op_mode, op0, op1);
+}
+
+inline rtx
+simplify_subreg (machine_mode outermode, rtx op, machine_mode innermode,
+ poly_uint64 byte)
+{
+ return simplify_context ().simplify_subreg (outermode, op, innermode, byte);
+}
+
+inline rtx
+simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
+ machine_mode op_mode)
+{
+ return simplify_context ().simplify_gen_unary (code, mode, op, op_mode);
+}
+
+inline rtx
+simplify_gen_binary (rtx_code code, machine_mode mode, rtx op0, rtx op1)
+{
+ return simplify_context ().simplify_gen_binary (code, mode, op0, op1);
+}
+
+inline rtx
+simplify_gen_ternary (rtx_code code, machine_mode mode, machine_mode op0_mode,
+ rtx op0, rtx op1, rtx op2)
+{
+ return simplify_context ().simplify_gen_ternary (code, mode, op0_mode,
+ op0, op1, op2);
+}
+
+inline rtx
+simplify_gen_relational (rtx_code code, machine_mode mode,
+ machine_mode op_mode, rtx op0, rtx op1)
+{
+ return simplify_context ().simplify_gen_relational (code, mode, op_mode,
+ op0, op1);
+}
+
+inline rtx
+simplify_gen_subreg (machine_mode outermode, rtx op, machine_mode innermode,
+ poly_uint64 byte)
+{
+ return simplify_context ().simplify_gen_subreg (outermode, op,
+ innermode, byte);
+}
+
+inline rtx
+lowpart_subreg (machine_mode outermode, rtx op, machine_mode innermode)
+{
+ return simplify_context ().lowpart_subreg (outermode, op, innermode);
+}
+
extern rtx simplify_const_unary_operation (enum rtx_code, machine_mode,
rtx, machine_mode);
-extern rtx simplify_unary_operation (enum rtx_code, machine_mode, rtx,
- machine_mode);
extern rtx simplify_const_binary_operation (enum rtx_code, machine_mode,
rtx, rtx);
-extern rtx simplify_binary_operation (enum rtx_code, machine_mode, rtx,
- rtx);
-extern rtx simplify_ternary_operation (enum rtx_code, machine_mode,
- machine_mode, rtx, rtx, rtx);
extern rtx simplify_const_relational_operation (enum rtx_code,
machine_mode, rtx, rtx);
-extern rtx simplify_relational_operation (enum rtx_code, machine_mode,
- machine_mode, rtx, rtx);
-extern rtx simplify_gen_binary (enum rtx_code, machine_mode, rtx, rtx);
-extern rtx simplify_gen_unary (enum rtx_code, machine_mode, rtx,
- machine_mode);
-extern rtx simplify_gen_ternary (enum rtx_code, machine_mode,
- machine_mode, rtx, rtx, rtx);
-extern rtx simplify_gen_relational (enum rtx_code, machine_mode,
- machine_mode, rtx, rtx);
-extern rtx simplify_subreg (machine_mode, rtx, machine_mode, poly_uint64);
-extern rtx simplify_gen_subreg (machine_mode, rtx, machine_mode, poly_uint64);
-extern rtx lowpart_subreg (machine_mode, rtx, machine_mode);
extern rtx simplify_replace_fn_rtx (rtx, const_rtx,
rtx (*fn) (rtx, const_rtx, void *), void *);
extern rtx simplify_replace_rtx (rtx, const_rtx, rtx);
((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
static bool plus_minus_operand_p (const_rtx);
-static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
-static rtx simplify_associative_operation (enum rtx_code, machine_mode,
- rtx, rtx);
-static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
- machine_mode, rtx, rtx);
-static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
-static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
- rtx, rtx, rtx, rtx);
\f
/* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
seeing if the expression folds. */
rtx
-simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
- rtx op1)
+simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode,
+ rtx op0, rtx op1)
{
rtx tem;
the specified operation. */
rtx
-simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
- machine_mode op_mode)
+simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
+ machine_mode op_mode)
{
rtx tem;
/* Likewise for ternary operations. */
rtx
-simplify_gen_ternary (enum rtx_code code, machine_mode mode,
- machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
+simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode,
+ machine_mode op0_mode,
+ rtx op0, rtx op1, rtx op2)
{
rtx tem;
CMP_MODE specifies mode comparison is done in. */
rtx
-simplify_gen_relational (enum rtx_code code, machine_mode mode,
- machine_mode cmp_mode, rtx op0, rtx op1)
+simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode,
+ machine_mode cmp_mode,
+ rtx op0, rtx op1)
{
rtx tem;
However, X is still an arbitrary 64-bit number and so we cannot
assume that truncating it too is a no-op. */
-static rtx
-simplify_truncation (machine_mode mode, rtx op,
- machine_mode op_mode)
+rtx
+simplify_context::simplify_truncation (machine_mode mode, rtx op,
+ machine_mode op_mode)
{
unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
MODE with input operand OP whose mode was originally OP_MODE.
Return zero if no simplification can be made. */
rtx
-simplify_unary_operation (enum rtx_code code, machine_mode mode,
- rtx op, machine_mode op_mode)
+simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode,
+ rtx op, machine_mode op_mode)
{
rtx trueop, tem;
/* Perform some simplifications we can do even if the operands
aren't constant. */
-static rtx
-simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
+rtx
+simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode,
+ rtx op)
{
enum rtx_code reversed;
rtx temp, elt, base, step;
operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
Return zero if no simplification or canonicalization is possible. */
-static rtx
-simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
- rtx op0, rtx op1)
+rtx
+simplify_context::simplify_byte_swapping_operation (rtx_code code,
+ machine_mode mode,
+ rtx op0, rtx op1)
{
rtx tem;
SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
canonicalization is possible. */
-static rtx
-simplify_associative_operation (enum rtx_code code, machine_mode mode,
- rtx op0, rtx op1)
+rtx
+simplify_context::simplify_associative_operation (rtx_code code,
+ machine_mode mode,
+ rtx op0, rtx op1)
{
rtx tem;
and OP1, which should be both relational operations. Return 0 if no such
simplification is possible. */
rtx
-simplify_logical_relational_operation (enum rtx_code code, machine_mode mode,
- rtx op0, rtx op1)
+simplify_context::simplify_logical_relational_operation (rtx_code code,
+ machine_mode mode,
+ rtx op0, rtx op1)
{
/* We only handle IOR of two relational operations. */
if (code != IOR)
Don't use this for relational operations such as EQ or LT.
Use simplify_relational_operation instead. */
rtx
-simplify_binary_operation (enum rtx_code code, machine_mode mode,
- rtx op0, rtx op1)
+simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode,
+ rtx op0, rtx op1)
{
rtx trueop0, trueop1;
rtx tem;
MODE is the mode of the operation and is known to be a vector
integer mode. */
-static rtx
-simplify_binary_operation_series (rtx_code code, machine_mode mode,
- rtx op0, rtx op1)
+rtx
+simplify_context::simplify_binary_operation_series (rtx_code code,
+ machine_mode mode,
+ rtx op0, rtx op1)
{
rtx base0, step0;
if (vec_duplicate_p (op0, &base0))
e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
Returns NULL_RTX if no simplification is possible. */
-static rtx
-simplify_distributive_operation (enum rtx_code code, machine_mode mode,
- rtx op0, rtx op1)
+rtx
+simplify_context::simplify_distributive_operation (rtx_code code,
+ machine_mode mode,
+ rtx op0, rtx op1)
{
enum rtx_code op = GET_CODE (op0);
gcc_assert (GET_CODE (op1) == op);
OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
actual constants. */
-static rtx
-simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
- rtx op0, rtx op1, rtx trueop0, rtx trueop1)
+rtx
+simplify_context::simplify_binary_operation_1 (rtx_code code,
+ machine_mode mode,
+ rtx op0, rtx op1,
+ rtx trueop0, rtx trueop1)
{
rtx tem, reversed, opleft, opright, elt0, elt1;
HOST_WIDE_INT val;
return op0;
/* Convert multiply by constant power of two into shift. */
- if (CONST_SCALAR_INT_P (trueop1))
+ if (mem_depth == 0 && CONST_SCALAR_INT_P (trueop1))
{
val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
if (val >= 0)
return op0;
if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
return op0;
+ if (mem_depth
+ && code == ASHIFT
+ && CONST_INT_P (trueop1)
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && IN_RANGE (UINTVAL (trueop1),
+ 1, GET_MODE_PRECISION (int_mode) - 1))
+ {
+ auto c = (wi::one (GET_MODE_PRECISION (int_mode))
+ << UINTVAL (trueop1));
+ rtx new_op1 = immed_wide_int_const (c, int_mode);
+ return simplify_gen_binary (MULT, int_mode, op0, new_op1);
+ }
goto canonicalize_shift;
case LSHIFTRT:
May return NULL_RTX when no changes were made. */
-static rtx
-simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
- rtx op1)
+rtx
+simplify_context::simplify_plus_minus (rtx_code code, machine_mode mode,
+ rtx op0, rtx op1)
{
struct simplify_plus_minus_op_data
{
the operands or, if both are VOIDmode, the operands are compared in
"infinite precision". */
rtx
-simplify_relational_operation (enum rtx_code code, machine_mode mode,
- machine_mode cmp_mode, rtx op0, rtx op1)
+simplify_context::simplify_relational_operation (rtx_code code,
+ machine_mode mode,
+ machine_mode cmp_mode,
+ rtx op0, rtx op1)
{
rtx tem, trueop0, trueop1;
MODE is the mode of the result, while CMP_MODE specifies in which
mode the comparison is done in, so it is the mode of the operands. */
-static rtx
-simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
- machine_mode cmp_mode, rtx op0, rtx op1)
+rtx
+simplify_context::simplify_relational_operation_1 (rtx_code code,
+ machine_mode mode,
+ machine_mode cmp_mode,
+ rtx op0, rtx op1)
{
enum rtx_code op0code = GET_CODE (op0);
Assume X is compared against zero with CMP_CODE and the true
arm is TRUE_VAL and the false arm is FALSE_VAL. */
-static rtx
-simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
+rtx
+simplify_context::simplify_cond_clz_ctz (rtx x, rtx_code cmp_code,
+ rtx true_val, rtx false_val)
{
if (cmp_code != EQ && cmp_code != NE)
return NULL_RTX;
Return the simplified X on success, otherwise return NULL_RTX. */
rtx
-simplify_merge_mask (rtx x, rtx mask, int op)
+simplify_context::simplify_merge_mask (rtx x, rtx mask, int op)
{
gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
a constant. Return 0 if no simplifications is possible. */
rtx
-simplify_ternary_operation (enum rtx_code code, machine_mode mode,
- machine_mode op0_mode, rtx op0, rtx op1,
- rtx op2)
+simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
+ machine_mode op0_mode,
+ rtx op0, rtx op1, rtx op2)
{
bool any_change = false;
rtx tem, trueop2;
/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
Return 0 if no simplifications are possible. */
rtx
-simplify_subreg (machine_mode outermode, rtx op,
- machine_mode innermode, poly_uint64 byte)
+simplify_context::simplify_subreg (machine_mode outermode, rtx op,
+ machine_mode innermode, poly_uint64 byte)
{
/* Little bit of sanity checking. */
gcc_assert (innermode != VOIDmode);
/* Make a SUBREG operation or equivalent if it folds. */
rtx
-simplify_gen_subreg (machine_mode outermode, rtx op,
- machine_mode innermode, poly_uint64 byte)
+simplify_context::simplify_gen_subreg (machine_mode outermode, rtx op,
+ machine_mode innermode,
+ poly_uint64 byte)
{
rtx newx;
INNER_MODE) to OUTER_MODE. */
rtx
-lowpart_subreg (machine_mode outer_mode, rtx expr,
- machine_mode inner_mode)
+simplify_context::lowpart_subreg (machine_mode outer_mode, rtx expr,
+ machine_mode inner_mode)
{
return simplify_gen_subreg (outer_mode, expr, inner_mode,
subreg_lowpart_offset (outer_mode, inner_mode));
series_0_1));
}
+static rtx
+simplify_merge_mask (rtx x, rtx mask, int op)
+{
+ return simplify_context ().simplify_merge_mask (x, mask, op);
+}
+
/* Verify simplify_merge_mask works correctly. */
static void