EXPECT_EQ(1U, s[0]->OutputCount());
}
+
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
::testing::ValuesIn(kMulDivInstructions));
+namespace {
+
+struct MulDPInst {
+ const char* mul_constructor_name;
+ Node* (RawMachineAssembler::*mul_constructor)(Node*, Node*);
+ Node* (RawMachineAssembler::*add_constructor)(Node*, Node*);
+ Node* (RawMachineAssembler::*sub_constructor)(Node*, Node*);
+ ArchOpcode add_arch_opcode;
+ ArchOpcode sub_arch_opcode;
+ ArchOpcode neg_arch_opcode;
+ MachineType machine_type;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MulDPInst& inst) {
+ return os << inst.mul_constructor_name;
+}
+
+} // namespace
+
+
+static const MulDPInst kMulDPInstructions[] = {
+ {"Int32Mul", &RawMachineAssembler::Int32Mul, &RawMachineAssembler::Int32Add,
+ &RawMachineAssembler::Int32Sub, kArm64Madd32, kArm64Msub32, kArm64Mneg32,
+ kMachInt32},
+ {"Int64Mul", &RawMachineAssembler::Int64Mul, &RawMachineAssembler::Int64Add,
+ &RawMachineAssembler::Int64Sub, kArm64Madd, kArm64Msub, kArm64Mneg,
+ kMachInt64}};
+
+
+typedef InstructionSelectorTestWithParam<MulDPInst>
+ InstructionSelectorIntDPWithIntMulTest;
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, AddWithMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(1), m.Parameter(2));
+ m.Return((m.*mdpi.add_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(0), m.Parameter(1));
+ m.Return((m.*mdpi.add_constructor)(n, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, SubWithMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(1), m.Parameter(2));
+ m.Return((m.*mdpi.sub_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.sub_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, NegativeMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n =
+ (m.*mdpi.sub_constructor)(BuildConstant(m, type, 0), m.Parameter(0));
+ m.Return((m.*mdpi.mul_constructor)(n, m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.neg_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n =
+ (m.*mdpi.sub_constructor)(BuildConstant(m, type, 0), m.Parameter(1));
+ m.Return((m.*mdpi.mul_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.neg_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorIntDPWithIntMulTest,
+ ::testing::ValuesIn(kMulDPInstructions));
+
+
// -----------------------------------------------------------------------------
// Floating point instructions.
void InstructionSelector::VisitInt32Add(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ // Select Madd(x, y, z) for Add(Mul(x, y), z).
+ if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArm64Madd32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+ return;
+ }
+ // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+ if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArm64Madd32, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
}
void InstructionSelector::VisitInt64Add(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // Select Madd(x, y, z) for Add(Mul(x, y), z).
+ if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ Emit(kArm64Madd, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+ return;
+ }
+ // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+ if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ Emit(kArm64Madd, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
+
+ // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+ if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArm64Msub32, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+
if (m.left().Is(0)) {
Emit(kArm64Neg32, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
void InstructionSelector::VisitInt64Sub(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
+
+ // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+ if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ Emit(kArm64Msub, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+
if (m.left().Is(0)) {
Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
} else {
void InstructionSelector::VisitInt32Mul(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+
+ if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+
+ // Select Mneg(x, y) for Mul(Sub(0, x), y).
+ if (mleft.left().Is(0)) {
+ Emit(kArm64Mneg32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ }
+
+ if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+
+ // Select Mneg(x, y) for Mul(x, Sub(0, y)).
+ if (mright.left().Is(0)) {
+ Emit(kArm64Mneg32, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
+
VisitRRR(this, kArm64Mul32, node);
}
void InstructionSelector::VisitInt64Mul(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+
+ if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+
+ // Select Mneg(x, y) for Mul(Sub(0, x), y).
+ if (mleft.left().Is(0)) {
+ Emit(kArm64Mneg, g.DefineAsRegister(node),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ }
+
+ if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+
+ // Select Mneg(x, y) for Mul(x, Sub(0, y)).
+ if (mright.left().Is(0)) {
+ Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
+
VisitRRR(this, kArm64Mul, node);
}