cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
kArithmeticImm);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ kArithmeticImm);
case IrOpcode::kFloat32Equal:
cont.OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(this, value, &cont);
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+}
+
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kEqual, node);
VisitFloat32Compare(this, node, &cont);
return MarkAsWord64(node), VisitUint64Div(node);
case IrOpcode::kUint64LessThan:
return VisitUint64LessThan(node);
+ case IrOpcode::kUint64LessThanOrEqual:
+ return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kChangeFloat32ToFloat64:
#endif // V8_TURBOFAN_BACKEND
// 32 bit targets do not implement the following instructions.
-#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
+#if !V8_TURBOFAN_BACKEND_64
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64Div, Operator::kNoProperties, 2, 0, 1) \
- V(Int64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
V(Int64LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint64Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
const Operator* Int64LessThanOrEqual();
const Operator* Uint64Div();
const Operator* Uint64LessThan();
+ const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
// These operators change the representation of numbers while preserving the
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(selector, value, cont);
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kEqual, node);
VisitFloat32Compare(this, node, &cont);
V(Int64LessThan) \
V(Int64LessThanOrEqual) \
V(Uint64LessThan) \
+ V(Uint64LessThanOrEqual) \
V(Float32Equal) \
V(Float32LessThan) \
V(Float32LessThanOrEqual) \
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
#endif
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
}
-void InstructionSelector::VisitUint64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
#endif
}
+Bounds Typer::Visitor::TypeUint64LessThanOrEqual(Node* node) {
+ return Bounds(Type::Boolean());
+}
+
+
Bounds Typer::Visitor::TypeUint64Mod(Node* node) {
return Bounds(Type::Internal());
}
case IrOpcode::kUint64Div:
case IrOpcode::kUint64Mod:
case IrOpcode::kUint64LessThan:
+ case IrOpcode::kUint64LessThanOrEqual:
case IrOpcode::kFloat32Add:
case IrOpcode::kFloat32Sub:
case IrOpcode::kFloat32Mul:
case IrOpcode::kUint64LessThan:
cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(this, value, &cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kFloat32Equal:
cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat32Compare(this, value, &cont);
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
VisitFloat32Compare(this, node, &cont);
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_X87
+
#define V8_TURBOFAN_BACKEND 1
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS64 || \
+ V8_TARGET_ARCH_PPC64
+// 64-bit TurboFan backends support 64-bit integer arithmetic.
+#define V8_TURBOFAN_BACKEND_64 1
+#else
+#define V8_TURBOFAN_BACKEND_64 0
+#endif
+
#else
#define V8_TURBOFAN_BACKEND 0
#endif
+
#if V8_TURBOFAN_BACKEND
#define V8_TURBOFAN_TARGET 1
#else
}
+#if V8_TURBOFAN_BACKEND_64
+static Node* Int64Input(RawMachineAssemblerTester<int64_t>* m, int index) {
+ switch (index) {
+ case 0:
+ return m->Parameter(0);
+ case 1:
+ return m->Parameter(1);
+ case 2:
+ return m->Int64Constant(0);
+ case 3:
+ return m->Int64Constant(1);
+ case 4:
+ return m->Int64Constant(-1);
+ case 5:
+ return m->Int64Constant(0xff);
+ case 6:
+ return m->Int64Constant(0x0123456789abcdefLL);
+ case 7:
+ return m->Load(kMachInt64, m->PointerConstant(NULL));
+ default:
+ return NULL;
+ }
+}
+
+
+TEST(CodeGenInt64Binop) {
+ RawMachineAssemblerTester<void> m;
+
+ const Operator* kOps[] = {
+ m.machine()->Word64And(), m.machine()->Word64Or(),
+ m.machine()->Word64Xor(), m.machine()->Word64Shl(),
+ m.machine()->Word64Shr(), m.machine()->Word64Sar(),
+ m.machine()->Word64Equal(), m.machine()->Int64Add(),
+ m.machine()->Int64Sub(), m.machine()->Int64Mul(), m.machine()->Int64Div(),
+ m.machine()->Uint64Div(), m.machine()->Int64Mod(),
+ m.machine()->Uint64Mod(), m.machine()->Int64LessThan(),
+ m.machine()->Int64LessThanOrEqual(), m.machine()->Uint64LessThan(),
+ m.machine()->Uint64LessThanOrEqual()};
+
+ for (size_t i = 0; i < arraysize(kOps); ++i) {
+ for (int j = 0; j < 8; j++) {
+ for (int k = 0; k < 8; k++) {
+ RawMachineAssemblerTester<int64_t> m(kMachInt64, kMachInt64);
+ Node* a = Int64Input(&m, j);
+ Node* b = Int64Input(&m, k);
+ m.Return(m.NewNode(kOps[i], a, b));
+ m.GenerateCode();
+ }
+ }
+ }
+}
+
+
+// TODO(titzer): add tests that run 64-bit integer operations.
+#endif // V8_TURBOFAN_BACKEND_64
+
+
TEST(RunGoto) {
RawMachineAssemblerTester<int32_t> m;
int constant = 99999;
PURE(Int64Add, 2, 0, 1), // --
PURE(Int64Sub, 2, 0, 1), // --
PURE(Int64Mul, 2, 0, 1), // --
- PURE(Int64Div, 2, 0, 1), // --
- PURE(Uint64Div, 2, 0, 1), // --
- PURE(Int64Mod, 2, 0, 1), // --
- PURE(Uint64Mod, 2, 0, 1), // --
+ PURE(Int64Div, 2, 1, 1), // --
+ PURE(Uint64Div, 2, 1, 1), // --
+ PURE(Int64Mod, 2, 1, 1), // --
+ PURE(Uint64Mod, 2, 1, 1), // --
PURE(Int64LessThan, 2, 0, 1), // --
PURE(Int64LessThanOrEqual, 2, 0, 1), // --
PURE(Uint64LessThan, 2, 0, 1), // --
+ PURE(Uint64LessThanOrEqual, 2, 0, 1), // --
PURE(ChangeFloat32ToFloat64, 1, 0, 1), // --
PURE(ChangeFloat64ToInt32, 1, 0, 1), // --
PURE(ChangeFloat64ToUint32, 1, 0, 1), // --
Node* Int64LessThan(Node* a, Node* b) {
return NewNode(machine()->Int64LessThan(), a, b);
}
+ Node* Int64LessThanOrEqual(Node* a, Node* b) {
+ return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+ }
Node* Uint64LessThan(Node* a, Node* b) {
return NewNode(machine()->Uint64LessThan(), a, b);
}
- Node* Int64LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+ Node* Uint64LessThanOrEqual(Node* a, Node* b) {
+ return NewNode(machine()->Uint64LessThanOrEqual(), a, b);
}
Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
Node* Int64GreaterThanOrEqual(Node* a, Node* b) {