#ifndef V8_COMPILER_NODE_MATCHERS_H_
#define V8_COMPILER_NODE_MATCHERS_H_
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/unique.h"
// right hand sides of a binary operation and can put constants on the right
// if they appear on the left hand side of a commutative operation.
template <typename Left, typename Right>
-struct BinopMatcher FINAL : public NodeMatcher {
+struct BinopMatcher : public NodeMatcher {
explicit BinopMatcher(Node* node)
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
bool LeftEqualsRight() const { return left().node() == right().node(); }
+ protected:
+ void SwapInputs() {
+ std::swap(left_, right_);
+ node()->ReplaceInput(0, left().node());
+ node()->ReplaceInput(1, right().node());
+ }
+
private:
void PutConstantOnRight() {
if (left().HasValue() && !right().HasValue()) {
- std::swap(left_, right_);
- node()->ReplaceInput(0, left().node());
- node()->ReplaceInput(1, right().node());
+ SwapInputs();
}
}
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
+struct Int32AddMatcher : public Int32BinopMatcher {
+ explicit Int32AddMatcher(Node* node)
+ : Int32BinopMatcher(node), scale_exponent_(-1) {
+ PutScaledInputOnLeft();
+ }
+
+ bool HasScaledInput() const { return scale_exponent_ != -1; }
+ Node* ScaledInput() const {
+ DCHECK(HasScaledInput());
+ return left().node()->InputAt(0);
+ }
+ int ScaleExponent() const {
+ DCHECK(HasScaledInput());
+ return scale_exponent_;
+ }
+
+ private:
+ int GetInputScaleExponent(Node* node) const {
+ if (node->opcode() == IrOpcode::kWord32Shl) {
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue()) {
+ int32_t value = m.right().Value();
+ if (value >= 0 && value <= 3) {
+ return value;
+ }
+ }
+ } else if (node->opcode() == IrOpcode::kInt32Mul) {
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue()) {
+ int32_t value = m.right().Value();
+ if (value == 1) {
+ return 0;
+ } else if (value == 2) {
+ return 1;
+ } else if (value == 4) {
+ return 2;
+ } else if (value == 8) {
+ return 3;
+ }
+ }
+ }
+ return -1;
+ }
+
+ void PutScaledInputOnLeft() {
+ scale_exponent_ = GetInputScaleExponent(right().node());
+ if (scale_exponent_ >= 0) {
+ int left_scale_exponent = GetInputScaleExponent(left().node());
+ if (left_scale_exponent == -1) {
+ SwapInputs();
+ } else {
+ scale_exponent_ = left_scale_exponent;
+ }
+ } else {
+ scale_exponent_ = GetInputScaleExponent(left().node());
+ if (scale_exponent_ == -1) {
+ if (right().opcode() == IrOpcode::kInt32Add &&
+ left().opcode() != IrOpcode::kInt32Add) {
+ SwapInputs();
+ }
+ }
+ }
+ }
+
+ int scale_exponent_;
+};
+
+struct ScaledWithOffsetMatcher {
+ explicit ScaledWithOffsetMatcher(Node* node)
+ : matches_(false),
+ scaled_(NULL),
+ scale_exponent_(0),
+ offset_(NULL),
+ constant_(NULL) {
+ if (node->opcode() != IrOpcode::kInt32Add) return;
+
+ // The Int32AddMatcher canonicalizes the order of constants and scale
+ // factors that are used as inputs, so instead of enumerating all possible
+ // patterns by brute force, checking for node clusters using the following
+ // templates in the following order suffices to find all of the interesting
+ // cases (S = scaled input, O = offset input, C = constant input):
+ // (S + (O + C))
+ // (S + (O + O))
+ // (S + C)
+ // (S + O)
+ // ((S + C) + O)
+ // ((S + O) + C)
+ // ((O + C) + O)
+ // ((O + O) + C)
+ // (O + C)
+ // (O + O)
+ Int32AddMatcher base_matcher(node);
+ Node* left = base_matcher.left().node();
+ Node* right = base_matcher.right().node();
+ if (base_matcher.HasScaledInput() && left->OwnedBy(node)) {
+ scaled_ = base_matcher.ScaledInput();
+ scale_exponent_ = base_matcher.ScaleExponent();
+ if (right->opcode() == IrOpcode::kInt32Add && right->OwnedBy(node)) {
+ Int32AddMatcher right_matcher(right);
+ if (right_matcher.right().HasValue()) {
+ // (S + (O + C))
+ offset_ = right_matcher.left().node();
+ constant_ = right_matcher.right().node();
+ } else {
+ // (S + (O + O))
+ offset_ = right;
+ }
+ } else if (base_matcher.right().HasValue()) {
+ // (S + C)
+ constant_ = right;
+ } else {
+ // (S + O)
+ offset_ = right;
+ }
+ } else {
+ if (left->opcode() == IrOpcode::kInt32Add && left->OwnedBy(node)) {
+ Int32AddMatcher left_matcher(left);
+ Node* left_left = left_matcher.left().node();
+ Node* left_right = left_matcher.right().node();
+ if (left_matcher.HasScaledInput() && left_left->OwnedBy(left)) {
+ scaled_ = left_matcher.ScaledInput();
+ scale_exponent_ = left_matcher.ScaleExponent();
+ if (left_matcher.right().HasValue()) {
+ // ((S + C) + O)
+ constant_ = left_right;
+ offset_ = right;
+ } else if (base_matcher.right().HasValue()) {
+ // ((S + O) + C)
+ offset_ = left_right;
+ constant_ = right;
+ } else {
+ // (O + O)
+ scaled_ = left;
+ offset_ = right;
+ }
+ } else {
+ if (left_matcher.right().HasValue()) {
+ // ((O + C) + O)
+ scaled_ = left_left;
+ constant_ = left_right;
+ offset_ = right;
+ } else if (base_matcher.right().HasValue()) {
+ // ((O + O) + C)
+ scaled_ = left_left;
+ offset_ = left_right;
+ constant_ = right;
+ } else {
+ // (O + O)
+ scaled_ = left;
+ offset_ = right;
+ }
+ }
+ } else {
+ if (base_matcher.right().HasValue()) {
+ // (O + C)
+ offset_ = left;
+ constant_ = right;
+ } else {
+ // (O + O)
+ offset_ = left;
+ scaled_ = right;
+ }
+ }
+ }
+ matches_ = true;
+ }
+
+ bool matches() const { return matches_; }
+ Node* scaled() const { return scaled_; }
+ int scale_exponent() const { return scale_exponent_; }
+ Node* offset() const { return offset_; }
+ Node* constant() const { return constant_; }
+
+ private:
+ bool matches_;
+
+ protected:
+ Node* scaled_;
+ int scale_exponent_;
+ Node* offset_;
+ Node* constant_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
VisitWord64Shift(this, node, kX64Ror);
}
+namespace {
+
+AddressingMode GenerateMemoryOperandInputs(X64OperandGenerator* g, Node* scaled,
+ int scale_exponent, Node* offset,
+ Node* constant,
+ InstructionOperand* inputs[],
+ size_t* input_count) {
+ AddressingMode mode = kMode_MRI;
+ if (offset != NULL) {
+ inputs[(*input_count)++] = g->UseRegister(offset);
+ if (scaled != NULL) {
+ DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+ inputs[(*input_count)++] = g->UseRegister(scaled);
+ if (constant != NULL) {
+ inputs[(*input_count)++] = g->UseImmediate(constant);
+ static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
+ kMode_MR4I, kMode_MR8I};
+ mode = kMRnI_modes[scale_exponent];
+ } else {
+ static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+ kMode_MR4, kMode_MR8};
+ mode = kMRn_modes[scale_exponent];
+ }
+ } else {
+ DCHECK(constant != NULL);
+ inputs[(*input_count)++] = g->UseImmediate(constant);
+ mode = kMode_MRI;
+ }
+ } else {
+ DCHECK(scaled != NULL);
+ DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+ inputs[(*input_count)++] = g->UseRegister(scaled);
+ if (constant != NULL) {
+ inputs[(*input_count)++] = g->UseImmediate(constant);
+ static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I,
+ kMode_M4I, kMode_M8I};
+ mode = kMnI_modes[scale_exponent];
+ } else {
+ static const AddressingMode kMn_modes[] = {kMode_M1, kMode_M2, kMode_M4,
+ kMode_M8};
+ mode = kMn_modes[scale_exponent];
+ }
+ }
+ return mode;
+}
+
+} // namespace
+
void InstructionSelector::VisitInt32Add(Node* node) {
+ // Try to match the Add to a leal pattern
+ ScaledWithOffsetMatcher m(node);
+ X64OperandGenerator g(this);
+ if (m.matches() && (m.constant() == NULL || g.CanBeImmediate(m.constant()))) {
+ InstructionOperand* inputs[4];
+ size_t input_count = 0;
+
+ AddressingMode mode = GenerateMemoryOperandInputs(
+ &g, m.scaled(), m.scale_exponent(), m.offset(), m.constant(), inputs,
+ &input_count);
+
+ DCHECK_NE(0, static_cast<int>(input_count));
+ DCHECK_GE(arraysize(inputs), input_count);
+
+ InstructionOperand* outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+
+ InstructionCode opcode = AddressingModeField::encode(mode) | kX64Lea32;
+
+ Emit(opcode, 1, outputs, input_count, inputs);
+ return;
+ }
+
VisitBinop(this, node, kX64Add32);
}
// -----------------------------------------------------------------------------
+
function TestDivisionLike(ref, construct, values, divisor) {
// Define the function to test.
var OptFun = new Function("dividend", construct(divisor));
%OptimizeFunctionOnNextCall(OptFun);
OptFun(13);
- // Check results.
- values.forEach(function(dividend) {
+function dude(dividend) {
// Avoid deopt caused by overflow, we do not want to test this here.
if (dividend === -2147483648 && divisor === -1) return;
assertEquals(ref(dividend, divisor), OptFun(dividend));
- });
+ }
+
+ // Check results.
+ values.forEach(dude);
}
function Test(ref, construct) {
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/opcodes.h"
+
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodeMatcherTest : public GraphTest {
+ public:
+ NodeMatcherTest() {}
+ virtual ~NodeMatcherTest() {}
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+};
+
+namespace {
+
+void CheckScaledWithOffsetMatch(ScaledWithOffsetMatcher* matcher, Node* scaled,
+ int scale_exponent, Node* offset,
+ Node* constant) {
+ EXPECT_TRUE(matcher->matches());
+ EXPECT_EQ(scaled, matcher->scaled());
+ EXPECT_EQ(scale_exponent, matcher->scale_exponent());
+ EXPECT_EQ(offset, matcher->offset());
+ EXPECT_EQ(constant, matcher->constant());
+}
+};
+
+
+TEST_F(NodeMatcherTest, ScaledWithOffsetMatcher) {
+ graph()->SetStart(graph()->NewNode(common()->Start(0)));
+
+ const Operator* c0_op = common()->Int32Constant(0);
+ Node* c0 = graph()->NewNode(c0_op);
+ USE(c0);
+ const Operator* c1_op = common()->Int32Constant(1);
+ Node* c1 = graph()->NewNode(c1_op);
+ USE(c1);
+ const Operator* c2_op = common()->Int32Constant(2);
+ Node* c2 = graph()->NewNode(c2_op);
+ USE(c2);
+ const Operator* c3_op = common()->Int32Constant(3);
+ Node* c3 = graph()->NewNode(c3_op);
+ USE(c3);
+ const Operator* c4_op = common()->Int32Constant(4);
+ Node* c4 = graph()->NewNode(c4_op);
+ USE(c4);
+ const Operator* c8_op = common()->Int32Constant(8);
+ Node* c8 = graph()->NewNode(c8_op);
+ USE(c8);
+
+ const Operator* o0_op = common()->Parameter(0);
+ Node* o0 = graph()->NewNode(o0_op, graph()->start());
+ USE(o0);
+ const Operator* o1_op = common()->Parameter(1);
+ Node* o1 = graph()->NewNode(o1_op, graph()->start());
+ USE(o0);
+
+ const Operator* p1_op = common()->Parameter(3);
+ Node* p1 = graph()->NewNode(p1_op, graph()->start());
+ USE(p1);
+
+ const Operator* a_op = machine()->Int32Add();
+ USE(a_op);
+
+ const Operator* m_op = machine()->Int32Mul();
+ Node* m1 = graph()->NewNode(m_op, p1, c1);
+ Node* m2 = graph()->NewNode(m_op, p1, c2);
+ Node* m4 = graph()->NewNode(m_op, p1, c4);
+ Node* m8 = graph()->NewNode(m_op, p1, c8);
+ Node* m3 = graph()->NewNode(m_op, p1, c3);
+
+ const Operator* s_op = machine()->Word32Shl();
+ Node* s0 = graph()->NewNode(s_op, p1, c0);
+ Node* s1 = graph()->NewNode(s_op, p1, c1);
+ Node* s2 = graph()->NewNode(s_op, p1, c2);
+ Node* s3 = graph()->NewNode(s_op, p1, c3);
+ Node* s4 = graph()->NewNode(s_op, p1, c4);
+
+ // 1 INPUT
+
+ // Only relevant test cases is checking for non-match.
+ ScaledWithOffsetMatcher match0(c0);
+ EXPECT_FALSE(match0.matches());
+
+ // 2 INPUT
+
+ // (O0 + O1) -> [O0, 0, O1, NULL]
+ ScaledWithOffsetMatcher match1(graph()->NewNode(a_op, o0, o1));
+ CheckScaledWithOffsetMatch(&match1, o1, 0, o0, NULL);
+
+ // (O0 + C0) -> [NULL, 0, O0, C0]
+ ScaledWithOffsetMatcher match2(graph()->NewNode(a_op, o0, c0));
+ CheckScaledWithOffsetMatch(&match2, NULL, 0, o0, c0);
+
+ // (C0 + O0) -> [NULL, 0, O0, C0]
+ ScaledWithOffsetMatcher match3(graph()->NewNode(a_op, c0, o0));
+ CheckScaledWithOffsetMatch(&match3, NULL, 0, o0, c0);
+
+ // (O0 + M1) -> [p1, 0, O0, NULL]
+ ScaledWithOffsetMatcher match4(graph()->NewNode(a_op, o0, m1));
+ CheckScaledWithOffsetMatch(&match4, p1, 0, o0, NULL);
+
+ // (M1 + O0) -> [p1, 0, O0, NULL]
+ m1 = graph()->NewNode(m_op, p1, c1);
+ ScaledWithOffsetMatcher match5(graph()->NewNode(a_op, m1, o0));
+ CheckScaledWithOffsetMatch(&match5, p1, 0, o0, NULL);
+
+ // (C0 + M1) -> [P1, 0, NULL, C0]
+ m1 = graph()->NewNode(m_op, p1, c1);
+ ScaledWithOffsetMatcher match6(graph()->NewNode(a_op, c0, m1));
+ CheckScaledWithOffsetMatch(&match6, p1, 0, NULL, c0);
+
+ // (M1 + C0) -> [P1, 0, NULL, C0]
+ m1 = graph()->NewNode(m_op, p1, c1);
+ ScaledWithOffsetMatcher match7(graph()->NewNode(a_op, m1, c0));
+ CheckScaledWithOffsetMatch(&match7, p1, 0, NULL, c0);
+
+ // (O0 + S0) -> [p1, 0, O0, NULL]
+ ScaledWithOffsetMatcher match8(graph()->NewNode(a_op, o0, s0));
+ CheckScaledWithOffsetMatch(&match8, p1, 0, o0, NULL);
+
+ // (S0 + O0) -> [p1, 0, O0, NULL]
+ s0 = graph()->NewNode(s_op, p1, c0);
+ ScaledWithOffsetMatcher match9(graph()->NewNode(a_op, s0, o0));
+ CheckScaledWithOffsetMatch(&match9, p1, 0, o0, NULL);
+
+ // (C0 + S0) -> [P1, 0, NULL, C0]
+ s0 = graph()->NewNode(s_op, p1, c0);
+ ScaledWithOffsetMatcher match10(graph()->NewNode(a_op, c0, s0));
+ CheckScaledWithOffsetMatch(&match10, p1, 0, NULL, c0);
+
+ // (S0 + C0) -> [P1, 0, NULL, C0]
+ s0 = graph()->NewNode(s_op, p1, c0);
+ ScaledWithOffsetMatcher match11(graph()->NewNode(a_op, s0, c0));
+ CheckScaledWithOffsetMatch(&match11, p1, 0, NULL, c0);
+
+ // (O0 + M2) -> [p1, 1, O0, NULL]
+ ScaledWithOffsetMatcher match12(graph()->NewNode(a_op, o0, m2));
+ CheckScaledWithOffsetMatch(&match12, p1, 1, o0, NULL);
+
+ // (M2 + O0) -> [p1, 1, O0, NULL]
+ m2 = graph()->NewNode(m_op, p1, c2);
+ ScaledWithOffsetMatcher match13(graph()->NewNode(a_op, m2, o0));
+ CheckScaledWithOffsetMatch(&match13, p1, 1, o0, NULL);
+
+ // (C0 + M2) -> [P1, 1, NULL, C0]
+ m2 = graph()->NewNode(m_op, p1, c2);
+ ScaledWithOffsetMatcher match14(graph()->NewNode(a_op, c0, m2));
+ CheckScaledWithOffsetMatch(&match14, p1, 1, NULL, c0);
+
+ // (M2 + C0) -> [P1, 1, NULL, C0]
+ m2 = graph()->NewNode(m_op, p1, c2);
+ ScaledWithOffsetMatcher match15(graph()->NewNode(a_op, m2, c0));
+ CheckScaledWithOffsetMatch(&match15, p1, 1, NULL, c0);
+
+ // (O0 + S1) -> [p1, 1, O0, NULL]
+ ScaledWithOffsetMatcher match16(graph()->NewNode(a_op, o0, s1));
+ CheckScaledWithOffsetMatch(&match16, p1, 1, o0, NULL);
+
+ // (S1 + O0) -> [p1, 1, O0, NULL]
+ s1 = graph()->NewNode(s_op, p1, c1);
+ ScaledWithOffsetMatcher match17(graph()->NewNode(a_op, s1, o0));
+ CheckScaledWithOffsetMatch(&match17, p1, 1, o0, NULL);
+
+ // (C0 + S1) -> [P1, 1, NULL, C0]
+ s1 = graph()->NewNode(s_op, p1, c1);
+ ScaledWithOffsetMatcher match18(graph()->NewNode(a_op, c0, s1));
+ CheckScaledWithOffsetMatch(&match18, p1, 1, NULL, c0);
+
+ // (S1 + C0) -> [P1, 1, NULL, C0]
+ s1 = graph()->NewNode(s_op, p1, c1);
+ ScaledWithOffsetMatcher match19(graph()->NewNode(a_op, s1, c0));
+ CheckScaledWithOffsetMatch(&match19, p1, 1, NULL, c0);
+
+ // (O0 + M4) -> [p1, 2, O0, NULL]
+ ScaledWithOffsetMatcher match20(graph()->NewNode(a_op, o0, m4));
+ CheckScaledWithOffsetMatch(&match20, p1, 2, o0, NULL);
+
+ // (M4 + O0) -> [p1, 2, O0, NULL]
+ m4 = graph()->NewNode(m_op, p1, c4);
+ ScaledWithOffsetMatcher match21(graph()->NewNode(a_op, m4, o0));
+ CheckScaledWithOffsetMatch(&match21, p1, 2, o0, NULL);
+
+ // (C0 + M4) -> [p1, 2, NULL, C0]
+ m4 = graph()->NewNode(m_op, p1, c4);
+ ScaledWithOffsetMatcher match22(graph()->NewNode(a_op, c0, m4));
+ CheckScaledWithOffsetMatch(&match22, p1, 2, NULL, c0);
+
+ // (M4 + C0) -> [p1, 2, NULL, C0]
+ m4 = graph()->NewNode(m_op, p1, c4);
+ ScaledWithOffsetMatcher match23(graph()->NewNode(a_op, m4, c0));
+ CheckScaledWithOffsetMatch(&match23, p1, 2, NULL, c0);
+
+ // (O0 + S2) -> [p1, 2, O0, NULL]
+ ScaledWithOffsetMatcher match24(graph()->NewNode(a_op, o0, s2));
+ CheckScaledWithOffsetMatch(&match24, p1, 2, o0, NULL);
+
+ // (S2 + O0) -> [p1, 2, O0, NULL]
+ s2 = graph()->NewNode(s_op, p1, c2);
+ ScaledWithOffsetMatcher match25(graph()->NewNode(a_op, s2, o0));
+ CheckScaledWithOffsetMatch(&match25, p1, 2, o0, NULL);
+
+ // (C0 + S2) -> [p1, 2, NULL, C0]
+ s2 = graph()->NewNode(s_op, p1, c2);
+ ScaledWithOffsetMatcher match26(graph()->NewNode(a_op, c0, s2));
+ CheckScaledWithOffsetMatch(&match26, p1, 2, NULL, c0);
+
+ // (S2 + C0) -> [p1, 2, NULL, C0]
+ s2 = graph()->NewNode(s_op, p1, c2);
+ ScaledWithOffsetMatcher match27(graph()->NewNode(a_op, s2, c0));
+ CheckScaledWithOffsetMatch(&match27, p1, 2, NULL, c0);
+
+ // (O0 + M8) -> [p1, 2, O0, NULL]
+ ScaledWithOffsetMatcher match28(graph()->NewNode(a_op, o0, m8));
+ CheckScaledWithOffsetMatch(&match28, p1, 3, o0, NULL);
+
+ // (M8 + O0) -> [p1, 2, O0, NULL]
+ m8 = graph()->NewNode(m_op, p1, c8);
+ ScaledWithOffsetMatcher match29(graph()->NewNode(a_op, m8, o0));
+ CheckScaledWithOffsetMatch(&match29, p1, 3, o0, NULL);
+
+ // (C0 + M8) -> [p1, 2, NULL, C0]
+ m8 = graph()->NewNode(m_op, p1, c8);
+ ScaledWithOffsetMatcher match30(graph()->NewNode(a_op, c0, m8));
+ CheckScaledWithOffsetMatch(&match30, p1, 3, NULL, c0);
+
+ // (M8 + C0) -> [p1, 2, NULL, C0]
+ m8 = graph()->NewNode(m_op, p1, c8);
+ ScaledWithOffsetMatcher match31(graph()->NewNode(a_op, m8, c0));
+ CheckScaledWithOffsetMatch(&match31, p1, 3, NULL, c0);
+
+ // (O0 + S3) -> [p1, 2, O0, NULL]
+ ScaledWithOffsetMatcher match32(graph()->NewNode(a_op, o0, s3));
+ CheckScaledWithOffsetMatch(&match32, p1, 3, o0, NULL);
+
+ // (S3 + O0) -> [p1, 2, O0, NULL]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match33(graph()->NewNode(a_op, s3, o0));
+ CheckScaledWithOffsetMatch(&match33, p1, 3, o0, NULL);
+
+ // (C0 + S3) -> [p1, 2, NULL, C0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match34(graph()->NewNode(a_op, c0, s3));
+ CheckScaledWithOffsetMatch(&match34, p1, 3, NULL, c0);
+
+ // (S3 + C0) -> [p1, 2, NULL, C0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match35(graph()->NewNode(a_op, s3, c0));
+ CheckScaledWithOffsetMatch(&match35, p1, 3, NULL, c0);
+
+ // 2 INPUT - NEGATIVE CASES
+
+ // (M3 + O1) -> [O0, 0, M3, NULL]
+ ScaledWithOffsetMatcher match36(graph()->NewNode(a_op, o1, m3));
+ CheckScaledWithOffsetMatch(&match36, m3, 0, o1, NULL);
+
+ // (S4 + O1) -> [O0, 0, S4, NULL]
+ ScaledWithOffsetMatcher match37(graph()->NewNode(a_op, o1, s4));
+ CheckScaledWithOffsetMatch(&match37, s4, 0, o1, NULL);
+
+ // 3 INPUT
+
+ // (C0 + S3) + O0 -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match38(
+ graph()->NewNode(a_op, graph()->NewNode(a_op, c0, s3), o0));
+ CheckScaledWithOffsetMatch(&match38, p1, 3, o0, c0);
+
+ // (O0 + C0) + S3 -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match39(
+ graph()->NewNode(a_op, graph()->NewNode(a_op, o0, c0), s3));
+ CheckScaledWithOffsetMatch(&match39, p1, 3, o0, c0);
+
+ // (S3 + O0) + C0 -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match40(
+ graph()->NewNode(a_op, graph()->NewNode(a_op, s3, o0), c0));
+ CheckScaledWithOffsetMatch(&match40, p1, 3, o0, c0);
+
+ // C0 + (S3 + O0) -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match41(
+ graph()->NewNode(a_op, c0, graph()->NewNode(a_op, s3, o0)));
+ CheckScaledWithOffsetMatch(&match41, p1, 3, o0, c0);
+
+ // O0 + (C0 + S3) -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match42(
+ graph()->NewNode(a_op, o0, graph()->NewNode(a_op, c0, s3)));
+ CheckScaledWithOffsetMatch(&match42, p1, 3, o0, c0);
+
+ // S3 + (O0 + C0) -> [p1, 2, o0, c0]
+ s3 = graph()->NewNode(s_op, p1, c3);
+ ScaledWithOffsetMatcher match43(
+ graph()->NewNode(a_op, s3, graph()->NewNode(a_op, o0, c0)));
+ CheckScaledWithOffsetMatch(&match43, p1, 3, o0, c0);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
m.Return(m.Int32Add(a0, p0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
- EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddConstantAsLea) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(p0, c0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLea) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(c0, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2Mul) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+ m.Return(m.Int32Add(p0, s0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Mul) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+ m.Return(m.Int32Add(s0, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2Shl) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+ m.Return(m.Int32Add(p0, s0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Shl) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+ m.Return(m.Int32Add(s0, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4Mul) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
+ m.Return(m.Int32Add(p0, s0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4Shl) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
+ m.Return(m.Int32Add(p0, s0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8Mul) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
+ m.Return(m.Int32Add(p0, s0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8Shl) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
+ m.Return(m.Int32Add(p0, s0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstant) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle1) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(p0, m.Int32Add(s0, c0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle2) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(s0, m.Int32Add(c0, p0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle3) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(m.Int32Add(s0, c0), p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle4) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(m.Int32Add(c0, p0), s0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle5) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(m.Int32Add(p0, s0), c0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2ShlWithConstant) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4MulWithConstant) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4ShlWithConstant) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8MulWithConstant) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8ShlWithConstant) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
+ Node* const c0 = m.Int32Constant(15);
+ m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
}
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
'compiler/machine-operator-reducer-unittest.cc',
- 'compiler/machine-operator-unittest.cc',
+ 'compiler/node-matchers-unittest.cc',
'compiler/node-test-utils.cc',
'compiler/node-test-utils.h',
'compiler/register-allocator-unittest.cc',