ScaledWithOffset32Matcher m(node);
X64OperandGenerator g(this);
if (m.matches() && (m.constant() == NULL || g.CanBeImmediate(m.constant()))) {
+ // The add can be represented as a "leal", but there may be a smaller
+ // representation that is better and no more expensive.
+ if (m.offset() != NULL) {
+ if (m.scaled() == NULL) {
+ if (!IsLive(m.offset())) {
+ // If the add is of the form (r1 + immediate) and the non-constant
+ // input to the add is owned by the add, then it doesn't need to be
+ // preserved across the operation, so use more compact,
+ // source-register-overwriting versions when they are available and
+ // smaller, e.g. "incl" and "decl".
+ int32_t value =
+ m.constant() == NULL ? 0 : OpParameter<int32_t>(m.constant());
+ if (value == 1) {
+ Emit(kX64Inc32, g.DefineSameAsFirst(node),
+ g.UseRegister(m.offset()));
+ return;
+ } else if (value == -1) {
+ Emit(kX64Dec32, g.DefineSameAsFirst(node),
+ g.UseRegister(m.offset()));
+ return;
+ }
+ }
+ }
+ }
+
InstructionOperand* inputs[4];
size_t input_count = 0;
-
AddressingMode mode = GenerateMemoryOperandInputs(
&g, m.scaled(), m.scale_exponent(), m.offset(), m.constant(), inputs,
&input_count);
if (m.left().Is(0)) {
Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else {
+ if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+ // If the Non-constant input is owned by the subtract, using a "decl" or
+ // "incl" that overwrites that input is smaller and probably an overall
+ // win.
+ if (!IsLive(m.left().node())) {
+ if (m.right().Value() == 1) {
+ Emit(kX64Dec32, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()));
+ return;
+ }
+ if (m.right().Value() == -1) {
+ Emit(kX64Inc32, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()));
+ return;
+ }
+ } else {
+ // Special handling for subtraction of constants where the non-constant
+ // input is used elsewhere. To eliminate the gap move before the sub to
+ // copy the destination register, use a "leal" instead.
+ Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(-m.right().Value()));
+ return;
+ }
+ }
VisitBinop(this, node, kX64Sub32);
}
}
// Addition.
-TEST_F(InstructionSelectorTest, Int32AddWithInt32AddWithParameters) {
+TEST_F(InstructionSelectorTest, Int32AddWithInt32ParametersLea) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const a0 = m.Int32Add(p0, p1);
- m.Return(m.Int32Add(a0, p0));
+ USE(a0);
+ // Additional uses of input to add chooses lea
+ Node* const a1 = m.Int32Add(p0, p1);
+ m.Return(m.Int32Add(a0, a1));
Stream s = m.Build();
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
}
-TEST_F(InstructionSelectorTest, Int32AddConstantAsLea) {
+TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) {
StreamBuilder m(this, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
+ // If there is only a single use of an add's input, still use lea and not add,
+ // it is faster.
m.Return(m.Int32Add(p0, c0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
}
-TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLea) {
+TEST_F(InstructionSelectorTest, Int32AddConstantAsInc) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const c0 = m.Int32Constant(1);
+ // If there is only a single use of an add's input and the immediate constant
+ // for the add is 1, use inc.
+ m.Return(m.Int32Add(p0, c0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Inc32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddConstantAsDec) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const c0 = m.Int32Constant(-1);
+ // If there is only a single use of an add's input and the immediate constant
+ // for the add is -11, use dec.
+ m.Return(m.Int32Add(p0, c0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Dec32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) {
StreamBuilder m(this, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
+ // A second use of an add's input uses lea
+ Node* const a0 = m.Int32Add(p0, c0);
+ USE(a0);
+ m.Return(m.Int32Add(p0, c0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const c0 = m.Int32Constant(15);
+ // If there is only a single use of an add's input, still use lea... it's
+ // generally faster than the add to reduce register pressure.
+ m.Return(m.Int32Add(c0, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const c0 = m.Int32Constant(15);
+ // A second use of an add's input uses lea
+ Node* const a0 = m.Int32Add(c0, p0);
+ USE(a0);
m.Return(m.Int32Add(c0, p0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
}
+TEST_F(InstructionSelectorTest, Int32SubConstantAsInc) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const c0 = m.Int32Constant(-1);
+ // If there is only a single use of an add's input and the immediate constant
+ // for the add is 1, use inc.
+ m.Return(m.Int32Sub(p0, c0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Inc32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubConstantAsDec) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const c0 = m.Int32Constant(1);
+ // If there is only a single use of an sub's input and the immediate constant
+ // for the add is 1, use dec.
+ m.Return(m.Int32Sub(p0, c0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Dec32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
// -----------------------------------------------------------------------------
// Multiplication.