case kArm64Ror32:
ASSEMBLE_SHIFT(Ror, 32);
break;
+ case kArm64Mov32:
+ __ Mov(i.OutputRegister32(), i.InputRegister32(0));
+ break;
+ case kArm64Sxtw:
+ __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
+ break;
case kArm64CallCodeObject: {
if (instr->InputAt(0)->IsImmediate()) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
0, 2);
break;
}
- case kArm64Int32ToInt64:
- __ Sxtw(i.OutputRegister(), i.InputRegister(0));
- break;
- case kArm64Int64ToInt32:
- if (!i.OutputRegister().is(i.InputRegister(0))) {
- __ Mov(i.OutputRegister(), i.InputRegister(0));
- }
- break;
case kArm64Float64ToInt32:
__ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
V(Arm64Sar32) \
V(Arm64Ror) \
V(Arm64Ror32) \
+ V(Arm64Mov32) \
+ V(Arm64Sxtw) \
V(Arm64CallCodeObject) \
V(Arm64CallJSFunction) \
V(Arm64CallAddress) \
V(Arm64Float64Mul) \
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
- V(Arm64Int32ToInt64) \
- V(Arm64Int64ToInt32) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \
};
-static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
- Arm64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Arm64OperandGenerator g(selector);
}
-void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
- VisitRR(this, kArm64Int32ToInt64, node);
-}
-
-
-void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
- VisitRR(this, kArm64Int64ToInt32, node);
-}
-
-
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
}
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat64Add(Node* node) {
VisitRRRFloat64(this, kArm64Float64Add, node);
}
Reduction ChangeLowering::ChangeInt32ToTagged(Node* val, Node* control) {
if (machine()->is64()) {
return Replace(
- graph()->NewNode(machine()->WordShl(), val, SmiShiftBitsConstant()));
+ graph()->NewNode(machine()->Word64Shl(),
+ graph()->NewNode(machine()->ChangeInt32ToInt64(), val),
+ SmiShiftBitsConstant()));
}
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val);
graph()->NewNode(machine()->WordSar(), val, SmiShiftBitsConstant());
Node* number =
machine()->is64()
- ? graph()->NewNode(machine()->ConvertInt64ToInt32(), integer)
+ ? graph()->NewNode(machine()->TruncateInt64ToInt32(), integer)
: integer;
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* number = graph()->NewNode(
machine()->ChangeInt32ToFloat64(),
machine()->is64()
- ? graph()->NewNode(machine()->ConvertInt64ToInt32(), integer)
+ ? graph()->NewNode(machine()->TruncateInt64ToInt32(), integer)
: integer);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
return VisitInt64LessThan(node);
case IrOpcode::kInt64LessThanOrEqual:
return VisitInt64LessThanOrEqual(node);
- case IrOpcode::kConvertInt32ToInt64:
- return VisitConvertInt32ToInt64(node);
- case IrOpcode::kConvertInt64ToInt32:
- return VisitConvertInt64ToInt32(node);
case IrOpcode::kChangeInt32ToFloat64:
return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
return VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kChangeInt32ToInt64:
+ return VisitChangeInt32ToInt64(node);
+ case IrOpcode::kChangeUint32ToUint64:
+ return VisitChangeUint32ToUint64(node);
+ case IrOpcode::kTruncateInt64ToInt32:
+ return VisitTruncateInt64ToInt32(node);
case IrOpcode::kFloat64Add:
return MarkAsDouble(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
return Int64LessThanOrEqual(b, a);
}
+ // TODO(turbofan): What is this used for?
Node* ConvertIntPtrToInt32(Node* a) {
- return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a)
+ return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->TruncateInt64ToInt32(), a)
: a;
}
Node* ConvertInt32ToIntPtr(Node* a) {
- return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a)
+ return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ChangeInt32ToInt64(), a)
: a;
}
}
// Conversions.
- Node* ConvertInt32ToInt64(Node* a) {
- return NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a);
- }
- Node* ConvertInt64ToInt32(Node* a) {
- return NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a);
- }
Node* ChangeInt32ToFloat64(Node* a) {
return NEW_NODE_1(MACHINE()->ChangeInt32ToFloat64(), a);
}
Node* ChangeFloat64ToUint32(Node* a) {
return NEW_NODE_1(MACHINE()->ChangeFloat64ToUint32(), a);
}
+ Node* ChangeInt32ToInt64(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeInt32ToInt64(), a);
+ }
+ Node* ChangeUint32ToUint64(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeUint32ToUint64(), a);
+ }
+ Node* TruncateInt64ToInt32(Node* a) {
+ return NEW_NODE_1(MACHINE()->TruncateInt64ToInt32(), a);
+ }
#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
// Call to C.
Operator* Int64LessThan() { BINOP(Int64LessThan); }
Operator* Int64LessThanOrEqual() { BINOP(Int64LessThanOrEqual); }
- Operator* ConvertInt32ToInt64() { UNOP(ConvertInt32ToInt64); }
- Operator* ConvertInt64ToInt32() { UNOP(ConvertInt64ToInt32); }
-
// Convert representation of integers between float64 and int32/uint32.
// The precise rounding mode and handling of out of range inputs are *not*
// defined for these operators, since they are intended only for use with
// integers.
- // TODO(titzer): rename ConvertXXX to ChangeXXX in machine operators.
Operator* ChangeInt32ToFloat64() { UNOP(ChangeInt32ToFloat64); }
Operator* ChangeUint32ToFloat64() { UNOP(ChangeUint32ToFloat64); }
Operator* ChangeFloat64ToInt32() { UNOP(ChangeFloat64ToInt32); }
Operator* ChangeFloat64ToUint32() { UNOP(ChangeFloat64ToUint32); }
+ // Sign/zero extend int32/uint32 to int64/uint64.
+ Operator* ChangeInt32ToInt64() { UNOP(ChangeInt32ToInt64); }
+ Operator* ChangeUint32ToUint64() { UNOP(ChangeUint32ToUint64); }
+
+ // Truncate the high order bits and convert the remaining bits to int32.
+ Operator* TruncateInt64ToInt32() { UNOP(TruncateInt64ToInt32); }
+
// Floating point operators always operate with IEEE 754 round-to-nearest.
Operator* Float64Add() { BINOP_C(Float64Add); }
Operator* Float64Sub() { BINOP(Float64Sub); }
V(Int64UMod) \
V(Int64LessThan) \
V(Int64LessThanOrEqual) \
- V(ConvertInt64ToInt32) \
- V(ConvertInt32ToInt64) \
V(ChangeInt32ToFloat64) \
V(ChangeUint32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToUint64) \
+ V(TruncateInt64ToInt32) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Mul) \
#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
-#include "src/v8.h"
-
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator-properties.h"
case IrOpcode::kWord64Equal:
return VisitBinop(node, kRepWord64, kRepBit);
- case IrOpcode::kConvertInt32ToInt64:
+ case IrOpcode::kChangeInt32ToInt64:
return VisitUnop(node, kTypeInt32 | kRepWord32,
- kTypeInt32 | kRepWord64);
- case IrOpcode::kConvertInt64ToInt32:
+ kTypeInt64 | kRepWord64);
+ case IrOpcode::kChangeUint32ToUint64:
+ return VisitUnop(node, kTypeUint32 | kRepWord32,
+ kTypeUint64 | kRepWord64);
+ case IrOpcode::kTruncateInt64ToInt32:
return VisitUnop(node, kTypeInt64 | kRepWord64,
kTypeInt32 | kRepWord32);
case kX64PushI:
__ pushq(i.InputImmediate(0));
break;
+ case kX64Movl: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ movl(i.OutputRegister(), input.reg);
+ } else {
+ __ movl(i.OutputRegister(), input.operand);
+ }
+ break;
+ }
+ case kX64Movsxlq: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ movsxlq(i.OutputRegister(), input.reg);
+ } else {
+ __ movsxlq(i.OutputRegister(), input.operand);
+ }
+ break;
+ }
case kX64CallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ addq(rsp, Immediate(kDoubleSize));
break;
}
- case kX64Int32ToInt64:
- __ movzxwq(i.OutputRegister(), i.InputRegister(0));
- break;
- case kX64Int64ToInt32:
- __ Move(i.OutputRegister(), i.InputRegister(0));
- break;
case kSSEFloat64ToInt32: {
RegisterOrOperand input = i.InputRegisterOrOperand(0);
if (input.type == kDoubleRegister) {
V(X64Ror32) \
V(X64Push) \
V(X64PushI) \
+ V(X64Movsxlq) \
+ V(X64Movl) \
V(X64CallCodeObject) \
V(X64CallAddress) \
V(PopStack) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
- V(X64Int32ToInt64) \
- V(X64Int64ToInt32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
}
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat64Add(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
}
-void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
- X64OperandGenerator g(this);
- // TODO(dcarney): other modes
- Emit(kX64Int64ToInt32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
- X64OperandGenerator g(this);
- // TODO(dcarney): other modes
- Emit(kX64Int32ToInt64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kX64Add32, cont);
}
}
+
+TEST(RunChangeInt32ToInt64P) {
+ if (kPointerSize < 8) return;
+ int64_t actual = -1;
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ m.StoreToPointer(&actual, kMachInt64, m.ChangeInt32ToInt64(m.Parameter(0)));
+ m.Return(m.Int32Constant(0));
+ FOR_INT32_INPUTS(i) {
+ int64_t expected = *i;
+ CHECK_EQ(0, m.Call(*i));
+ CHECK_EQ(expected, actual);
+ }
+}
+
+
+TEST(RunChangeUint32ToUint64P) {
+ if (kPointerSize < 8) return;
+ int64_t actual = -1;
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.StoreToPointer(&actual, kMachUint64,
+ m.ChangeUint32ToUint64(m.Parameter(0)));
+ m.Return(m.Int32Constant(0));
+ FOR_UINT32_INPUTS(i) {
+ int64_t expected = static_cast<uint64_t>(*i);
+ CHECK_EQ(0, m.Call(*i));
+ CHECK_EQ(expected, actual);
+ }
+}
+
+
+TEST(RunTruncateInt64ToInt32P) {
+ if (kPointerSize < 8) return;
+ int64_t expected = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ m.Return(m.TruncateInt64ToInt32(m.LoadFromPointer(&expected, kMachInt64)));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ expected = (static_cast<uint64_t>(*j) << 32) | *i;
+ CHECK_UINT32_EQ(expected, m.Call());
+ }
+ }
+}
+
#endif // V8_TURBOFAN_TARGET
#include "test/compiler-unittests/instruction-selector-unittest.h"
-#include "test/cctest/compiler/instruction-selector-tester.h"
-
namespace v8 {
namespace internal {
namespace compiler {
}
}
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
+ StreamBuilder m(this, kMachInt64, kMachInt32);
+ m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Sxtw, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
+ StreamBuilder m(this, kMachUint64, kMachUint32);
+ m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Mov32, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Mov32, s[0]->arch_opcode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(),
- IsWord64Shl(val, IsInt32Constant(SmiShiftAmount())));
+ IsWord64Shl(IsChangeInt32ToInt64(val),
+ IsInt32Constant(SmiShiftAmount())));
}
IsPhi(
IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
IsControlEffect(CaptureEq(&if_true))),
- IsChangeInt32ToFloat64(IsConvertInt64ToInt32(
+ IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
IsWord64Sar(val, IsInt32Constant(SmiShiftAmount())))),
IsMerge(
AllOf(CaptureEq(&if_true),
IsPhi(IsChangeFloat64ToInt32(IsLoad(
kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
IsControlEffect(CaptureEq(&if_true)))),
- IsConvertInt64ToInt32(
+ IsTruncateInt64ToInt32(
IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
IsIfFalse(AllOf(
'graph-unittest.h',
'instruction-selector-unittest.cc',
'machine-operator-reducer-unittest.cc',
+ 'machine-operator-unittest.cc',
],
'conditions': [
['v8_target_arch=="arm"', {
'ia32/instruction-selector-ia32-unittest.cc',
],
}],
+ ['v8_target_arch=="x64"', {
+ 'sources': [ ### gcmole(arch:x64) ###
+ 'x64/instruction-selector-x64-unittest.cc',
+ ],
+ }],
['component=="shared_library"', {
# compiler-unittests can't be built against a shared library, so we
# need to depend on the underlying static target in that case.
Zone zone_;
};
+
+template <typename T>
+class CompilerTestWithParam : public CompilerTest,
+ public ::testing::WithParamInterface<T> {};
+
} // namespace compiler
} // namespace internal
} // namespace v8
Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) { \
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
-IS_UNOP_MATCHER(ConvertInt64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeInt32ToFloat64)
+IS_UNOP_MATCHER(ChangeInt32ToInt64)
+IS_UNOP_MATCHER(ChangeUint32ToUint64)
+IS_UNOP_MATCHER(TruncateInt64ToInt32)
#undef IS_UNOP_MATCHER
} // namespace compiler
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsConvertInt64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
} // namespace compiler
} // namespace internal
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "test/compiler-unittests/compiler-unittests.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::IsNull;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MachineOperatorCommonTest : public CompilerTestWithParam<MachineType> {
+ public:
+ MachineOperatorCommonTest() : machine_(NULL) {}
+ virtual ~MachineOperatorCommonTest() { EXPECT_THAT(machine_, IsNull()); }
+
+ virtual void SetUp() V8_OVERRIDE {
+ CompilerTestWithParam::SetUp();
+ machine_ = new MachineOperatorBuilder(zone(), GetParam());
+ }
+
+ virtual void TearDown() V8_OVERRIDE {
+ delete machine_;
+ machine_ = NULL;
+ CompilerTestWithParam::TearDown();
+ }
+
+ protected:
+ MachineOperatorBuilder* machine() const { return machine_; }
+
+ private:
+ MachineOperatorBuilder* machine_;
+};
+
+
+TEST_P(MachineOperatorCommonTest, ChangeInt32ToInt64) {
+ Operator* op = machine()->ChangeInt32ToInt64();
+ EXPECT_EQ(1, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+}
+
+
+TEST_P(MachineOperatorCommonTest, ChangeUint32ToUint64) {
+ Operator* op = machine()->ChangeUint32ToUint64();
+ EXPECT_EQ(1, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+}
+
+
+TEST_P(MachineOperatorCommonTest, TruncateInt64ToInt32) {
+ Operator* op = machine()->TruncateInt64ToInt32();
+ EXPECT_EQ(1, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorTest, MachineOperatorCommonTest,
+ ::testing::Values(kRepWord32, kRepWord64));
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/compiler-unittests/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
+ StreamBuilder m(this, kMachInt64, kMachInt32);
+ m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
+ StreamBuilder m(this, kMachUint64, kMachUint32);
+ m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8