2015-04-30 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+ * config/aarch64/aarch64.c (aarch64_rtx_mult_cost): Handle MNEG
+ and [SU]MNEGL patterns.
+
+2015-04-30 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
* config/aarch64/aarch64.c (aarch64_shift_p): New function.
(aarch64_rtx_mult_cost): Update comment to reflect that it also handles
combined arithmetic-shift ops. Properly handle all shift and extend
return cost;
}
+ /* MNEG or [US]MNEGL. Extract the NEG operand and indicate that it's a
+ compound and let the below cases handle it. After all, MNEG is a
+ special-case alias of MSUB. */
+ if (GET_CODE (op0) == NEG)
+ {
+ op0 = XEXP (op0, 0);
+ compound_p = true;
+ }
+
/* Integer multiplies or FMAs have zero/sign extending variants. */
if ((GET_CODE (op0) == ZERO_EXTEND
&& GET_CODE (op1) == ZERO_EXTEND)
if (speed)
{
if (compound_p)
- /* MADD/SMADDL/UMADDL. */
+ /* SMADDL/UMADDL/UMSUBL/SMSUBL. */
cost += extra_cost->mult[0].extend_add;
else
/* MUL/SMULL/UMULL. */
return cost;
}
- /* This is either an integer multiply or an FMA. In both cases
+ /* This is either an integer multiply or a MADD. In both cases
we want to recurse and cost the operands. */
cost += rtx_cost (op0, MULT, 0, speed)
+ rtx_cost (op1, MULT, 1, speed);
if (speed)
{
if (compound_p)
- /* MADD. */
+ /* MADD/MSUB. */
cost += extra_cost->mult[mode == DImode].add;
else
/* MUL. */