case kArmVsqrtF64:
__ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVfloorF64:
+ __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVceilF64:
+ __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVroundTruncateF64:
+ __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVroundTiesAwayF64:
+ __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
case kArmVnegF64:
__ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
V(ArmVmodF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
+ V(ArmVfloorF64) \
+ V(ArmVceilF64) \
+ V(ArmVroundTruncateF64) \
+ V(ArmVroundTiesAwayF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF64S32) \
case kArmVmodF64:
case kArmVnegF64:
case kArmVsqrtF64:
+ case kArmVfloorF64:
+ case kArmVceilF64:
+ case kArmVroundTruncateF64:
+ case kArmVroundTiesAwayF64:
case kArmVcvtF32F64:
case kArmVcvtF64F32:
case kArmVcvtF64S32:
};
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
ArmOperandGenerator g(selector);
}
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ VisitRRFloat64(this, kArmVfloorF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ VisitRRFloat64(this, kArmVceilF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ VisitRRFloat64(this, kArmVroundTruncateF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
+}
+
+
void InstructionSelector::VisitCall(Node* node) {
ArmOperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kInt32DivIsSafe |
- MachineOperatorBuilder::kInt32ModIsSafe |
- MachineOperatorBuilder::kUint32DivIsSafe |
- MachineOperatorBuilder::kUint32ModIsSafe;
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kInt32ModIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kUint32ModIsSafe;
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ flags |= MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesAway;
+ }
+ return flags;
}
} // namespace compiler
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float64Ceil:
+ __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArm64Float64Floor:
+ __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArm64Float64RoundTruncate:
+ __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArm64Float64RoundTiesAway:
+ __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Add:
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
break;
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
V(Arm64Float64Sqrt) \
+ V(Arm64Float64Floor) \
+ V(Arm64Float64Ceil) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float64RoundTiesAway) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
};
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Arm64OperandGenerator g(selector);
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRRFloat64(this, kArm64Float64Sqrt, node);
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+ VisitRRFloat64(this, kArm64Float64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+ VisitRRFloat64(this, kArm64Float64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRRFloat64(this, kArm64Float64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ VisitRRFloat64(this, kArm64Float64RoundTiesAway, node);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kNoFlags;
+ return MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesAway;
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
+ case kSSEFloat64Floor: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundDown);
+ break;
+ }
+ case kSSEFloat64Ceil: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundUp);
+ break;
+ }
+ case kSSEFloat64RoundTruncate: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundToZero);
+ break;
+ }
case kSSECvtss2sd:
__ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
+ V(SSEFloat64Floor) \
+ V(SSEFloat64Ceil) \
+ V(SSEFloat64RoundTruncate) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
};
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ IA32OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
}
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ VisitRRFloat64(this, kSSEFloat64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ VisitRRFloat64(this, kSSEFloat64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+
void InstructionSelector::VisitCall(Node* node) {
IA32OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kNoFlags;
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ return MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate;
+ }
+ return MachineOperatorBuilder::Flag::kNoFlags;
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Sqrt:
+ case IrOpcode::kFloat64Floor:
+ case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundTruncate:
+ case IrOpcode::kFloat64RoundTiesAway:
return kMachFloat64;
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
+ case IrOpcode::kFloat64Floor:
+ return MarkAsDouble(node), VisitFloat64Floor(node);
+ case IrOpcode::kFloat64Ceil:
+ return MarkAsDouble(node), VisitFloat64Ceil(node);
+ case IrOpcode::kFloat64RoundTruncate:
+ return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
+ case IrOpcode::kFloat64RoundTiesAway:
+ return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
+ break;
}
}
}
+// ES6 draft 10-14-14, section 20.2.2.16.
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+ if (!machine()->HasFloat64Floor()) return NoChange();
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.floor(a:number) -> Float64Floor(a)
+ Node* value = graph()->NewNode(machine()->Float64Floor(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+// ES6 draft 10-14-14, section 20.2.2.10.
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+ if (!machine()->HasFloat64Ceil()) return NoChange();
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.ceil(a:number) -> Float64Ceil(a)
+ Node* value = graph()->NewNode(machine()->Float64Ceil(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
Reduction JSBuiltinReducer::Reduce(Node* node) {
JSCallReduction r(node);
return ReplaceWithPureReduction(node, ReduceMathImul(node));
case kMathFround:
return ReplaceWithPureReduction(node, ReduceMathFround(node));
+ case kMathFloor:
+ return ReplaceWithPureReduction(node, ReduceMathFloor(node));
+ case kMathCeil:
+ return ReplaceWithPureReduction(node, ReduceMathCeil(node));
default:
break;
}
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathFround(Node* node);
+ Reduction ReduceMathFloor(Node* node);
+ Reduction ReduceMathCeil(Node* node);
JSGraph* jsgraph_;
SimplifiedOperatorBuilder simplified_;
}
+void InstructionSelector::VisitFloat64Floor(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+
void InstructionSelector::VisitCall(Node* node) {
MipsOperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
RawMachineAssembler::RawMachineAssembler(Graph* graph,
MachineSignature* machine_sig,
- MachineType word)
+ MachineType word,
+ MachineOperatorBuilder::Flags flags)
: GraphBuilder(graph),
schedule_(new (zone()) Schedule(zone())),
- machine_(word),
+ machine_(word, flags),
common_(zone()),
machine_sig_(machine_sig),
call_descriptor_(
};
RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
- MachineType word = kMachPtr);
+ MachineType word = kMachPtr,
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::Flag::kNoFlags);
virtual ~RawMachineAssembler() {}
Isolate* isolate() const { return zone()->isolate(); }
Node* TruncateInt64ToInt32(Node* a) {
return NewNode(machine()->TruncateInt64ToInt32(), a);
}
+ Node* Float64Floor(Node* a) { return NewNode(machine()->Float64Floor(), a); }
+ Node* Float64Ceil(Node* a) { return NewNode(machine()->Float64Ceil(), a); }
+ Node* Float64RoundTruncate(Node* a) {
+ return NewNode(machine()->Float64RoundTruncate(), a);
+ }
+ Node* Float64RoundTiesAway(Node* a) {
+ return NewNode(machine()->Float64RoundTiesAway(), a);
+ }
// Parameters.
Node* Parameter(size_t index);
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
+ case kSSEFloat64Floor: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundDown);
+ break;
+ }
+ case kSSEFloat64Ceil: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundUp);
+ break;
+ }
+ case kSSEFloat64RoundTruncate: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundToZero);
+ break;
+ }
case kSSECvtss2sd:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
+ V(SSEFloat64Floor) \
+ V(SSEFloat64Ceil) \
+ V(SSEFloat64RoundTruncate) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
};
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
}
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ VisitRRFloat64(this, kSSEFloat64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ VisitRRFloat64(this, kSSEFloat64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+
void InstructionSelector::VisitCall(Node* node) {
X64OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ return MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate;
+ }
return MachineOperatorBuilder::kNoFlags;
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
#include "src/v8.h"
+#include "src/compiler/instruction-selector.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/simulator.h"
public:
MachineAssemblerTester(MachineType return_type, MachineType p0,
MachineType p1, MachineType p2, MachineType p3,
- MachineType p4)
+ MachineType p4,
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::Flag::kNoFlags)
: HandleAndZoneScope(),
CallHelper(
main_isolate(),
MachineAssembler(
new (main_zone()) Graph(main_zone()),
MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4),
- kMachPtr) {}
+ kMachPtr, flags) {}
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
return this->Load(rep, this->PointerConstant(address),
MachineType p3 = kMachNone,
MachineType p4 = kMachNone)
: MachineAssemblerTester<RawMachineAssembler>(
- ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3,
- p4) {}
+ ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3, p4,
+ InstructionSelector::SupportedMachineOperatorFlags()) {}
template <typename Ci, typename Fn>
void Run(const Ci& ci, const Fn& fn) {
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <cmath>
#include <functional>
#include <limits>
}
}
+
+static double two_30 = 1 << 30; // 2^30 is a smi boundary.
+static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary.
+static double kValues[] = {0.1,
+ 0.2,
+ 0.49999999999999994,
+ 0.5,
+ 0.7,
+ 1.0 - std::numeric_limits<double>::epsilon(),
+ -0.1,
+ -0.49999999999999994,
+ -0.5,
+ -0.7,
+ 1.1,
+ 1.0 + std::numeric_limits<double>::epsilon(),
+ 1.5,
+ 1.7,
+ -1,
+ -1 + std::numeric_limits<double>::epsilon(),
+ -1 - std::numeric_limits<double>::epsilon(),
+ -1.1,
+ -1.5,
+ -1.7,
+ std::numeric_limits<double>::min(),
+ -std::numeric_limits<double>::min(),
+ std::numeric_limits<double>::max(),
+ -std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity(),
+ -std::numeric_limits<double>::infinity(),
+ two_30,
+ two_30 + 0.1,
+ two_30 + 0.5,
+ two_30 + 0.7,
+ two_30 - 1,
+ two_30 - 1 + 0.1,
+ two_30 - 1 + 0.5,
+ two_30 - 1 + 0.7,
+ -two_30,
+ -two_30 + 0.1,
+ -two_30 + 0.5,
+ -two_30 + 0.7,
+ -two_30 + 1,
+ -two_30 + 1 + 0.1,
+ -two_30 + 1 + 0.5,
+ -two_30 + 1 + 0.7,
+ two_52,
+ two_52 + 0.1,
+ two_52 + 0.5,
+ two_52 + 0.5,
+ two_52 + 0.7,
+ two_52 + 0.7,
+ two_52 - 1,
+ two_52 - 1 + 0.1,
+ two_52 - 1 + 0.5,
+ two_52 - 1 + 0.7,
+ -two_52,
+ -two_52 + 0.1,
+ -two_52 + 0.5,
+ -two_52 + 0.7,
+ -two_52 + 1,
+ -two_52 + 1 + 0.1,
+ -two_52 + 1 + 0.5,
+ -two_52 + 1 + 0.7,
+ two_30,
+ two_30 - 0.1,
+ two_30 - 0.5,
+ two_30 - 0.7,
+ two_30 - 1,
+ two_30 - 1 - 0.1,
+ two_30 - 1 - 0.5,
+ two_30 - 1 - 0.7,
+ -two_30,
+ -two_30 - 0.1,
+ -two_30 - 0.5,
+ -two_30 - 0.7,
+ -two_30 + 1,
+ -two_30 + 1 - 0.1,
+ -two_30 + 1 - 0.5,
+ -two_30 + 1 - 0.7,
+ two_52,
+ two_52 - 0.1,
+ two_52 - 0.5,
+ two_52 - 0.5,
+ two_52 - 0.7,
+ two_52 - 0.7,
+ two_52 - 1,
+ two_52 - 1 - 0.1,
+ two_52 - 1 - 0.5,
+ two_52 - 1 - 0.7,
+ -two_52,
+ -two_52 - 0.1,
+ -two_52 - 0.5,
+ -two_52 - 0.7,
+ -two_52 + 1,
+ -two_52 + 1 - 0.1,
+ -two_52 + 1 - 0.5,
+ -two_52 + 1 - 0.7};
+
+
+TEST(RunFloat64Floor) {
+ double input = -1.0;
+ double result = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ if (!m.machine()->HasFloat64Floor()) return;
+ m.StoreToPointer(&result, kMachFloat64,
+ m.Float64Floor(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.Int32Constant(0));
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ input = kValues[i];
+ CHECK_EQ(0, m.Call());
+ double expected = std::floor(kValues[i]);
+ CHECK_EQ(expected, result);
+ }
+}
+
+
+TEST(RunFloat64Ceil) {
+ double input = -1.0;
+ double result = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ if (!m.machine()->HasFloat64Ceil()) return;
+ m.StoreToPointer(&result, kMachFloat64,
+ m.Float64Ceil(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.Int32Constant(0));
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ input = kValues[i];
+ CHECK_EQ(0, m.Call());
+ double expected = std::ceil(kValues[i]);
+ CHECK_EQ(expected, result);
+ }
+}
+
+
+TEST(RunFloat64RoundTruncate) {
+ double input = -1.0;
+ double result = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ if (!m.machine()->HasFloat64Ceil()) return;
+ m.StoreToPointer(
+ &result, kMachFloat64,
+ m.Float64RoundTruncate(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.Int32Constant(0));
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ input = kValues[i];
+ CHECK_EQ(0, m.Call());
+ double expected = trunc(kValues[i]);
+ CHECK_EQ(expected, result);
+ }
+}
+
+
+TEST(RunFloat64RoundTiesAway) {
+ double input = -1.0;
+ double result = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ if (!m.machine()->HasFloat64RoundTiesAway()) return;
+ m.StoreToPointer(
+ &result, kMachFloat64,
+ m.Float64RoundTiesAway(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.Int32Constant(0));
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ input = kValues[i];
+ CHECK_EQ(0, m.Call());
+ double expected = round(kValues[i]);
+ CHECK_EQ(expected, result);
+ }
+}
#endif // V8_TURBOFAN_TARGET
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+ "use asm";
+
+ var ceil = stdlib.Math.ceil;
+
+ // f: double -> float
+ function f(a) {
+ a = +a;
+ return ceil(a);
+ }
+
+ return { f: f };
+}
+
+var f = Module({ Math: Math }).f;
+
+assertTrue(isNaN(f(NaN)));
+assertTrue(isNaN(f(undefined)));
+assertTrue(isNaN(f(function() {})));
+
+assertEquals(0, f(0));
+assertEquals(+0, f(+0));
+assertEquals(-0, f(-0));
+assertEquals(1, f(0.49999));
+assertEquals(1, f(0.6));
+assertEquals(1, f(0.5));
+assertEquals(-0, f(-0.1));
+assertEquals(-0, f(-0.5));
+assertEquals(-0, f(-0.6));
+assertEquals(-1, f(-1.6));
+assertEquals(-0, f(-0.50001));
+
+assertEquals("Infinity", String(f(Infinity)));
+assertEquals("-Infinity", String(f(-Infinity)));
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+ "use asm";
+
+ var floor = stdlib.Math.floor;
+
+ // f: double -> float
+ function f(a) {
+ a = +a;
+ return floor(a);
+ }
+
+ return { f: f };
+}
+
+var f = Module({ Math: Math }).f;
+
+assertTrue(isNaN(f(NaN)));
+assertTrue(isNaN(f(undefined)));
+assertTrue(isNaN(f(function() {})));
+
+assertEquals(0, f(0));
+assertEquals(+0, f(+0));
+assertEquals(-0, f(-0));
+assertEquals(0, f(0.49999));
+assertEquals(+0, f(0.6));
+assertEquals(+0, f(0.5));
+assertEquals(-1, f(-0.1));
+assertEquals(-1, f(-0.5));
+assertEquals(-1, f(-0.6));
+assertEquals(-2, f(-1.6));
+assertEquals(-1, f(-0.50001));
+
+assertEquals("Infinity", String(f(Infinity)));
+assertEquals("-Infinity", String(f(-Infinity)));
JSBuiltinReducerTest() : javascript_(zone()) {}
protected:
- Reduction Reduce(Node* node) {
- MachineOperatorBuilder machine;
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::Flag::kNoFlags) {
+ MachineOperatorBuilder machine(kMachPtr, flags);
JSGraph jsgraph(graph(), common(), javascript(), &machine);
JSBuiltinReducer reducer(&jsgraph);
return reducer.Reduce(node);
}
}
+
+// -----------------------------------------------------------------------------
+// Math.floor
+
+
+TEST_F(JSBuiltinReducerTest, MathFloorAvailable) {
+ Handle<JSFunction> f = MathFunction("floor");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Floor);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Floor(p0));
+ }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathFloorUnavailable) {
+ Handle<JSFunction> f = MathFunction("floor");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
+
+ ASSERT_FALSE(r.Changed());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.ceil
+
+
+TEST_F(JSBuiltinReducerTest, MathCeilAvailable) {
+ Handle<JSFunction> f = MathFunction("ceil");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Ceil);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Ceil(p0));
+ }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathCeilUnavailable) {
+ Handle<JSFunction> f = MathFunction("ceil");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
+
+ ASSERT_FALSE(r.Changed());
+ }
+}
} // namespace compiler
} // namespace internal
} // namespace v8
IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
IS_UNOP_MATCHER(Float64Sqrt)
+IS_UNOP_MATCHER(Float64Floor)
+IS_UNOP_MATCHER(Float64Ceil)
+IS_UNOP_MATCHER(Float64RoundTruncate)
+IS_UNOP_MATCHER(Float64RoundTiesAway)
#undef IS_UNOP_MATCHER
} // namespace compiler
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
} // namespace compiler
} // namespace internal