}
+static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node) {
+ Int32BinopMatcher m(node);
+ if (!m.right().HasValue()) return false;
+ int32_t displacement_value = m.right().Value();
+ Node* left = m.left().node();
+ LeaMultiplyMatcher lmm(left);
+ if (!lmm.Matches()) return false;
+ AddressingMode mode;
+ size_t input_count;
+ IA32OperandGenerator g(selector);
+ InstructionOperand* index = g.UseRegister(lmm.Left());
+ InstructionOperand* displacement = g.TempImmediate(displacement_value);
+ InstructionOperand* inputs[] = {index, displacement, displacement};
+ if (lmm.Displacement() != 0) {
+ input_count = 3;
+ inputs[1] = index;
+ mode = kMode_MR1I;
+ } else {
+ input_count = 2;
+ mode = kMode_M1I;
+ }
+ mode = AdjustAddressingMode(mode, lmm.Power());
+ InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
+ selector->Emit(kIA32Lea | AddressingModeField::encode(mode), 1, outputs,
+ input_count, inputs);
+ return true;
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
+ if (TryEmitLeaMultAdd(this, node)) return;
VisitBinop(this, node, kIA32Add);
}
}
-void InstructionSelector::VisitInt32Mul(Node* node) {
- IA32OperandGenerator g(this);
+static bool TryEmitLeaMult(InstructionSelector* selector, Node* node) {
LeaMultiplyMatcher lea(node);
// Try to match lea.
- if (lea.Matches()) {
- ArchOpcode opcode = kIA32Lea;
- AddressingMode mode;
- size_t input_count;
- InstructionOperand* left = g.UseRegister(lea.Left());
- InstructionOperand* inputs[] = {left, left};
- if (lea.Displacement() != 0) {
- input_count = 2;
- mode = kMode_MR1;
- } else {
- input_count = 1;
- mode = kMode_M1;
- }
- mode = AdjustAddressingMode(mode, lea.Power());
- InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
- Emit(opcode | AddressingModeField::encode(mode), 1, outputs, input_count,
- inputs);
+ if (!lea.Matches()) return false;
+ AddressingMode mode;
+ size_t input_count;
+ IA32OperandGenerator g(selector);
+ InstructionOperand* left = g.UseRegister(lea.Left());
+ InstructionOperand* inputs[] = {left, left};
+ if (lea.Displacement() != 0) {
+ input_count = 2;
+ mode = kMode_MR1;
} else {
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- if (g.CanBeImmediate(right)) {
- Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
- g.UseImmediate(right));
- } else {
- if (g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
+ input_count = 1;
+ mode = kMode_M1;
+ }
+ mode = AdjustAddressingMode(mode, lea.Power());
+ InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
+ selector->Emit(kIA32Lea | AddressingModeField::encode(mode), 1, outputs,
+ input_count, inputs);
+ return true;
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ if (TryEmitLeaMult(this, node)) return;
+ IA32OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeImmediate(right)) {
+ Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
+ g.UseImmediate(right));
+ } else {
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
}
+ Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
}
}
}
+static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ int32_t displacement_value;
+ Node* left;
+ {
+ Int32BinopMatcher m32(node);
+ left = m32.left().node();
+ if (m32.right().HasValue()) {
+ displacement_value = m32.right().Value();
+ } else {
+ Int64BinopMatcher m64(node);
+ if (!m64.right().HasValue()) {
+ return false;
+ }
+ int64_t value_64 = m64.right().Value();
+ displacement_value = static_cast<int32_t>(value_64);
+ if (displacement_value != value_64) return false;
+ }
+ }
+ LeaMultiplyMatcher lmm(left);
+ if (!lmm.Matches()) return false;
+ AddressingMode mode;
+ size_t input_count;
+ X64OperandGenerator g(selector);
+ InstructionOperand* index = g.UseRegister(lmm.Left());
+ InstructionOperand* displacement = g.TempImmediate(displacement_value);
+ InstructionOperand* inputs[] = {index, displacement, displacement};
+ if (lmm.Displacement() != 0) {
+ input_count = 3;
+ inputs[1] = index;
+ mode = kMode_MR1I;
+ } else {
+ input_count = 2;
+ mode = kMode_M1I;
+ }
+ mode = AdjustAddressingMode(mode, lmm.Power());
+ InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
+ selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
+ input_count, inputs);
+ return true;
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
+ if (TryEmitLeaMultAdd(this, node, kX64Lea32)) return;
VisitBinop(this, node, kX64Add32);
}
void InstructionSelector::VisitInt64Add(Node* node) {
+ if (TryEmitLeaMultAdd(this, node, kX64Lea)) return;
VisitBinop(this, node, kX64Add);
}
}
+static bool TryEmitLeaMult(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ LeaMultiplyMatcher lea(node);
+ // Try to match lea.
+ if (!lea.Matches()) return false;
+ AddressingMode mode;
+ size_t input_count;
+ X64OperandGenerator g(selector);
+ InstructionOperand* left = g.UseRegister(lea.Left());
+ InstructionOperand* inputs[] = {left, left};
+ if (lea.Displacement() != 0) {
+ input_count = 2;
+ mode = kMode_MR1;
+ } else {
+ input_count = 1;
+ mode = kMode_M1;
+ }
+ mode = AdjustAddressingMode(mode, lea.Power());
+ InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
+ selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
+ input_count, inputs);
+ return true;
+}
+
+
static void VisitMul(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X64OperandGenerator g(selector);
- LeaMultiplyMatcher lea(node);
- // Try to match lea.
- if (lea.Matches()) {
- switch (opcode) {
- case kX64Imul32:
- opcode = kX64Lea32;
- break;
- case kX64Imul:
- opcode = kX64Lea;
- break;
- default:
- UNREACHABLE();
- }
- AddressingMode mode;
- size_t input_count;
- InstructionOperand* left = g.UseRegister(lea.Left());
- InstructionOperand* inputs[] = {left, left};
- if (lea.Displacement() != 0) {
- input_count = 2;
- mode = kMode_MR1;
- } else {
- input_count = 1;
- mode = kMode_M1;
- }
- mode = AdjustAddressingMode(mode, lea.Power());
- InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
- selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
- input_count, inputs);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
+ g.UseImmediate(right));
} else {
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- if (g.CanBeImmediate(right)) {
- selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
- g.UseImmediate(right));
- } else {
- if (g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
}
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
}
}
void InstructionSelector::VisitInt32Mul(Node* node) {
+ if (TryEmitLeaMult(this, node, kX64Lea32)) return;
VisitMul(this, node, kX64Imul32);
}
void InstructionSelector::VisitInt64Mul(Node* node) {
+ if (TryEmitLeaMult(this, node, kX64Lea)) return;
VisitMul(this, node, kX64Imul);
}
TEST(RunInt32MulAndInt32AddP) {
{
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ int32_t p0 = *i;
+ int32_t p1 = *j;
+ m.Return(m.Int32Add(m.Int32Constant(p0),
+ m.Int32Mul(m.Parameter(0), m.Int32Constant(p1))));
+ FOR_INT32_INPUTS(k) {
+ int32_t p2 = *k;
+ int expected = p0 + static_cast<int32_t>(p1 * p2);
+ CHECK_EQ(expected, m.Call(p2));
+ }
+ }
+ }
+ }
+ {
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
m.Return(
m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
static const std::vector<uint32_t> uint32_vector() {
static const uint32_t kValues[] = {
0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+ // This row is useful for testing lea optimizations on intel.
+ 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000008, 0x00000009,
0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
static unsigned InputCountForLea(AddressingMode mode) {
switch (mode) {
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I:
+ return 3U;
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I:
+ return 2U;
case kMode_MR1:
case kMode_MR2:
case kMode_MR4:
}
+static AddressingMode AddressingModeForAddMult(const MultParam& m) {
+ switch (m.addressing_mode) {
+ case kMode_MR1:
+ return kMode_MR1I;
+ case kMode_MR2:
+ return kMode_MR2I;
+ case kMode_MR4:
+ return kMode_MR4I;
+ case kMode_MR8:
+ return kMode_MR8I;
+ case kMode_M1:
+ return kMode_M1I;
+ case kMode_M2:
+ return kMode_M2I;
+ case kMode_M4:
+ return kMode_M4I;
+ case kMode_M8:
+ return kMode_M8I;
+ default:
+ UNREACHABLE();
+ return kMode_None;
+ }
+}
+
+
TEST_P(InstructionSelectorMultTest, Mult32) {
const MultParam m_param = GetParam();
StreamBuilder m(this, kMachInt32, kMachInt32);
}
+TEST_P(InstructionSelectorMultTest, MultAdd32) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ const MultParam m_param = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* param = m.Parameter(0);
+ Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)),
+ m.Int32Constant(imm));
+ m.Return(mult);
+ Stream s = m.Build();
+ if (m_param.lea_expected) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
+ EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
+ unsigned input_count = InputCountForLea(s[0]->addressing_mode());
+ ASSERT_EQ(input_count, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE,
+ s[0]->InputAt(input_count - 1)->kind());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
+ } else {
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
+ EXPECT_EQ(kIA32Add, s[1]->arch_opcode());
+ }
+ }
+}
+
+
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMultTest,
::testing::ValuesIn(kMultParams));
namespace internal {
namespace compiler {
+namespace {
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+ kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+
+} // namespace
+
+
// -----------------------------------------------------------------------------
// Conversions.
static unsigned InputCountForLea(AddressingMode mode) {
switch (mode) {
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I:
+ return 3U;
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I:
+ return 2U;
case kMode_MR1:
case kMode_MR2:
case kMode_MR4:
}
+static AddressingMode AddressingModeForAddMult(const MultParam& m) {
+ switch (m.addressing_mode) {
+ case kMode_MR1:
+ return kMode_MR1I;
+ case kMode_MR2:
+ return kMode_MR2I;
+ case kMode_MR4:
+ return kMode_MR4I;
+ case kMode_MR8:
+ return kMode_MR8I;
+ case kMode_M1:
+ return kMode_M1I;
+ case kMode_M2:
+ return kMode_M2I;
+ case kMode_M4:
+ return kMode_M4I;
+ case kMode_M8:
+ return kMode_M8I;
+ default:
+ UNREACHABLE();
+ return kMode_None;
+ }
+}
+
+
TEST_P(InstructionSelectorMultTest, Mult32) {
const MultParam m_param = GetParam();
StreamBuilder m(this, kMachInt32, kMachInt32);
}
+TEST_P(InstructionSelectorMultTest, MultAdd32) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ const MultParam m_param = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* param = m.Parameter(0);
+ Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)),
+ m.Int32Constant(imm));
+ m.Return(mult);
+ Stream s = m.Build();
+ if (m_param.lea_expected) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
+ unsigned input_count = InputCountForLea(s[0]->addressing_mode());
+ ASSERT_EQ(input_count, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE,
+ s[0]->InputAt(input_count - 1)->kind());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
+ } else {
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
+ EXPECT_EQ(kX64Add32, s[1]->arch_opcode());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorMultTest, MultAdd64) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ const MultParam m_param = GetParam();
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ Node* param = m.Parameter(0);
+ Node* mult = m.Int64Add(m.Int64Mul(param, m.Int64Constant(m_param.value)),
+ m.Int64Constant(imm));
+ m.Return(mult);
+ Stream s = m.Build();
+ if (m_param.lea_expected) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea, s[0]->arch_opcode());
+ EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
+ unsigned input_count = InputCountForLea(s[0]->addressing_mode());
+ ASSERT_EQ(input_count, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE,
+ s[0]->InputAt(input_count - 1)->kind());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
+ } else {
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kX64Imul, s[0]->arch_opcode());
+ EXPECT_EQ(kX64Add, s[1]->arch_opcode());
+ }
+ }
+}
+
+
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMultTest,
::testing::ValuesIn(kMultParams));