case kArmVsqrtF64:
__ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVfloorF64:
+ case kArmVrintmF64:
__ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVceilF64:
+ case kArmVrintpF64:
__ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVroundTruncateF64:
+ case kArmVrintzF64:
__ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVroundTiesAwayF64:
+ case kArmVrintaF64:
__ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVnegF64:
V(ArmVmodF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
- V(ArmVfloorF64) \
- V(ArmVceilF64) \
- V(ArmVroundTruncateF64) \
- V(ArmVroundTiesAwayF64) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF64S32) \
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kArmVrintpF64, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
Emit(kArmVnegF64, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVfloorF64, node);
-}
-
-
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVceilF64, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kArmVrintmF64, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVroundTruncateF64, node);
+ VisitRRFloat64(this, kArmVrintzF64, node);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
+ VisitRRFloat64(this, kArmVrintaF64, node);
}
MachineOperatorBuilder::kUint32DivIsSafe;
if (CpuFeatures::IsSupported(ARMv8)) {
- flags |= MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway;
}
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Float64Ceil:
- __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- break;
- case kArm64Float64Floor:
+ case kArm64Float64RoundDown:
__ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float64RoundTiesAway:
+ __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Float64RoundTruncate:
__ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Float64RoundTiesAway:
- __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kArm64Float64RoundUp:
+ __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Add:
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
V(Arm64Float64Sqrt) \
- V(Arm64Float64Floor) \
- V(Arm64Float64Ceil) \
- V(Arm64Float64RoundTruncate) \
+ V(Arm64Float64RoundDown) \
V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float64RoundUp) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ Arm64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRRFloat64(this, kArm64Float64Sub, node);
}
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRRFloat64(this, kArm64Float64Floor, node);
-}
-
-
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRRFloat64(this, kArm64Float64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kArm64Float64RoundDown, node);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32ShiftIsSafe |
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64Floor: {
+ case kSSEFloat64Round: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundDown);
- break;
- }
- case kSSEFloat64Ceil: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundUp);
- break;
- }
- case kSSEFloat64RoundTruncate: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundToZero);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
case kSSECvtss2sd:
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
- V(SSEFloat64Floor) \
- V(SSEFloat64Ceil) \
- V(SSEFloat64RoundTruncate) \
+ V(SSEFloat64Round) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
};
-static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+static void VisitRRFloat64(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
void InstructionSelector::VisitFloat64Sub(Node* node) {
IA32OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
+ g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
if (IsSupported(AVX)) {
Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Floor, node);
-}
-
-
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundDown), node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+ VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundToZero),
+ node);
}
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe;
if (CpuFeatures::IsSupported(SSE4_1)) {
- flags |= MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
return flags;
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
return kMachFloat64;
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
- case IrOpcode::kFloat64Floor:
- return MarkAsDouble(node), VisitFloat64Floor(node);
- case IrOpcode::kFloat64Ceil:
- return MarkAsDouble(node), VisitFloat64Ceil(node);
+ case IrOpcode::kFloat64RoundDown:
+ return MarkAsDouble(node), VisitFloat64RoundDown(node);
case IrOpcode::kFloat64RoundTruncate:
return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
// ES6 draft 10-14-14, section 20.2.2.16.
Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
- if (!machine()->HasFloat64Floor()) return NoChange();
+ if (!machine()->HasFloat64RoundDown()) return NoChange();
JSCallReduction r(node);
if (r.InputsMatchOne(Type::Number())) {
- // Math.floor(a:number) -> Float64Floor(a)
- Node* value = graph()->NewNode(machine()->Float64Floor(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-
-// ES6 draft 10-14-14, section 20.2.2.10.
-Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
- if (!machine()->HasFloat64Ceil()) return NoChange();
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.ceil(a:number) -> Float64Ceil(a)
- Node* value = graph()->NewNode(machine()->Float64Ceil(), r.left());
+ // Math.floor(a:number) -> Float64RoundDown(a)
+ Node* value = graph()->NewNode(machine()->Float64RoundDown(), r.left());
return Replace(value);
}
return NoChange();
return ReplaceWithPureReduction(node, ReduceMathFround(node));
case kMathFloor:
return ReplaceWithPureReduction(node, ReduceMathFloor(node));
- case kMathCeil:
- return ReplaceWithPureReduction(node, ReduceMathCeil(node));
default:
break;
}
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathFround(Node* node);
Reduction ReduceMathFloor(Node* node);
- Reduction ReduceMathCeil(Node* node);
JSGraph* jsgraph() const { return jsgraph_; }
Graph* graph() const;
return ReduceInlineDoubleHi(node);
case Runtime::kInlineIsRegExp:
return ReduceInlineIsInstanceType(node, JS_REGEXP_TYPE);
+ case Runtime::kInlineMathFloor:
+ return ReduceInlineMathFloor(node);
case Runtime::kInlineValueOf:
return ReduceInlineValueOf(node);
default:
}
+Reduction JSIntrinsicLowering::ReduceInlineMathFloor(Node* node) {
+ if (!machine()->HasFloat64RoundDown()) return NoChange();
+ return Change(node, machine()->Float64RoundDown());
+}
+
+
Reduction JSIntrinsicLowering::ReduceInlineValueOf(Node* node) {
// if (%_IsSmi(value)) {
// return value;
Reduction ReduceInlineConstructDouble(Node* node);
Reduction ReduceInlineDoubleLo(Node* node);
Reduction ReduceInlineDoubleHi(Node* node);
+ Reduction ReduceInlineMathFloor(Node* node);
Reduction ReduceInlineValueOf(Node* node);
Reduction Change(Node* node, const Operator* op);
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Ceil, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Floor, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
// for operations that are unsupported by some back-ends.
enum Flag {
kNoFlags = 0u,
- kFloat64Floor = 1u << 0,
- kFloat64Ceil = 1u << 1,
- kFloat64RoundTruncate = 1u << 2,
- kFloat64RoundTiesAway = 1u << 3,
- kInt32DivIsSafe = 1u << 4,
- kUint32DivIsSafe = 1u << 5,
- kWord32ShiftIsSafe = 1u << 6
+ kFloat64RoundDown = 1u << 0,
+ kFloat64RoundTruncate = 1u << 1,
+ kFloat64RoundTiesAway = 1u << 2,
+ kInt32DivIsSafe = 1u << 3,
+ kUint32DivIsSafe = 1u << 4,
+ kWord32ShiftIsSafe = 1u << 5
};
typedef base::Flags<Flag, unsigned> Flags;
const Operator* Float64LessThanOrEqual();
// Floating point rounding.
- const Operator* Float64Floor();
- const Operator* Float64Ceil();
+ const Operator* Float64RoundDown();
const Operator* Float64RoundTruncate();
const Operator* Float64RoundTiesAway();
- bool HasFloat64Floor() { return flags_ & kFloat64Floor; }
- bool HasFloat64Ceil() { return flags_ & kFloat64Ceil; }
+ bool HasFloat64RoundDown() { return flags_ & kFloat64RoundDown; }
bool HasFloat64RoundTruncate() { return flags_ & kFloat64RoundTruncate; }
bool HasFloat64RoundTiesAway() { return flags_ & kFloat64RoundTiesAway; }
V(Float64Div) \
V(Float64Mod) \
V(Float64Sqrt) \
- V(Float64Floor) \
- V(Float64Ceil) \
+ V(Float64RoundDown) \
V(Float64RoundTruncate) \
V(Float64RoundTiesAway) \
V(Float64ExtractLowWord32) \
Node* TruncateInt64ToInt32(Node* a) {
return NewNode(machine()->TruncateInt64ToInt32(), a);
}
- Node* Float64Floor(Node* a) { return NewNode(machine()->Float64Floor(), a); }
- Node* Float64Ceil(Node* a) { return NewNode(machine()->Float64Ceil(), a); }
+ Node* Float64RoundDown(Node* a) {
+ return NewNode(machine()->Float64RoundDown(), a);
+ }
Node* Float64RoundTruncate(Node* a) {
return NewNode(machine()->Float64RoundTruncate(), a);
}
case IrOpcode::kFloat64Mod:
return VisitFloat64Binop(node);
case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
return VisitUnop(node, kMachFloat64, kMachFloat64);
case Runtime::kInlineDoubleHi:
return Bounds(Type::None(zone()), Type::Signed32());
case Runtime::kInlineConstructDouble:
+ case Runtime::kInlineMathFloor:
return Bounds(Type::None(zone()), Type::Number());
default:
break;
}
-Bounds Typer::Visitor::TypeFloat64Floor(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Ceil(Node* node) {
+Bounds Typer::Visitor::TypeFloat64RoundDown(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Bounds(Type::Number());
}
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
case IrOpcode::kFloat64Equal:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
- case kSSEFloat64Floor: {
+ case kSSEFloat64Round: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundDown);
- break;
- }
- case kSSEFloat64Ceil: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundUp);
- break;
- }
- case kSSEFloat64RoundTruncate: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundToZero);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
case kSSECvtss2sd:
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
- V(SSEFloat64Floor) \
- V(SSEFloat64Ceil) \
- V(SSEFloat64RoundTruncate) \
+ V(SSEFloat64Round) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
void InstructionSelector::VisitFloat64Sub(Node* node) {
X64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
+ g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
if (IsSupported(AVX)) {
Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
namespace {
-void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+void VisitRRFloat64(InstructionSelector* selector, InstructionCode opcode,
Node* node) {
X64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
} // namespace
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Floor, node);
-}
-
-
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundDown), node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+ VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundToZero),
+ node);
}
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe;
if (CpuFeatures::IsSupported(SSE4_1)) {
- flags |= MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
return flags;
}
+void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
}
+enum RoundingMode {
+ kRoundToNearest = 0x0,
+ kRoundDown = 0x1,
+ kRoundUp = 0x2,
+ kRoundToZero = 0x3
+};
+
+
// -----------------------------------------------------------------------------
// Machine instruction Immediates
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, const Operand& src);
- enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
- };
-
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ bind(&non_zero);
}
- __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+ __ roundsd(xmm_scratch, input_reg, kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
- return -MathFloor(-x);
+ return -%_MathFloor(-x);
}
// ECMA 262 - 15.8.2.8
// ECMA 262 - 15.8.2.9
function MathFloor(x) {
- x = TO_NUMBER_INLINE(x);
- // It's more common to call this with a positive number that's out
- // of range than negative numbers; check the upper bound first.
- if (x < 0x80000000 && x > 0) {
- // Numbers in the range [0, 2^31) can be floored by converting
- // them to an unsigned 32-bit value using the shift operator.
- // We avoid doing so for -0, because the result of Math.floor(-0)
- // has to be -0, which wouldn't be the case with the shift.
- return TO_UINT32(x);
- } else {
- return %MathFloorRT(x);
- }
+ return %_MathFloor(+x);
}
// ECMA 262 - 15.8.2.10
}
-RUNTIME_FUNCTION(Runtime_MathFloorRT) {
+RUNTIME_FUNCTION(Runtime_MathFloor) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
isolate->counters()->math_floor()->Increment();
F(MathAcos, 1, 1) \
F(MathAsin, 1, 1) \
F(MathAtan, 1, 1) \
- F(MathFloorRT, 1, 1) \
F(MathAtan2, 2, 1) \
F(MathExpRT, 1, 1) \
F(RoundNumber, 1, 1) \
F(ConstructDouble, 2, 1) \
F(DoubleHi, 1, 1) \
F(DoubleLo, 1, 1) \
+ F(MathFloor, 1, 1) \
F(MathSqrtRT, 1, 1) \
F(MathLogRT, 1, 1) \
/* ES6 Collections */ \
}
-void Assembler::roundsd(XMMRegister dst, XMMRegister src,
- Assembler::RoundingMode mode) {
+void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
}
+enum RoundingMode {
+ kRoundToNearest = 0x0,
+ kRoundDown = 0x1,
+ kRoundUp = 0x2,
+ kRoundToZero = 0x3
+};
+
+
// -----------------------------------------------------------------------------
// Machine instruction Immediates
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
- enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
- };
-
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
// AVX instruction
__ subq(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
}
- __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+ __ roundsd(xmm_scratch, input_reg, kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
-two_52 + 1 - 0.7};
-TEST(RunFloat64Floor) {
+TEST(RunFloat64RoundDown1) {
double input = -1.0;
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
- if (!m.machine()->HasFloat64Floor()) return;
+ if (!m.machine()->HasFloat64RoundDown()) return;
m.StoreToPointer(&result, kMachFloat64,
- m.Float64Floor(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Float64RoundDown(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
for (size_t i = 0; i < arraysize(kValues); ++i) {
input = kValues[i];
}
-TEST(RunFloat64Ceil) {
+TEST(RunFloat64RoundDown2) {
double input = -1.0;
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
- if (!m.machine()->HasFloat64Ceil()) return;
+ if (!m.machine()->HasFloat64RoundDown()) return;
m.StoreToPointer(&result, kMachFloat64,
- m.Float64Ceil(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Float64Sub(m.Float64Constant(-0.0),
+ m.Float64RoundDown(m.Float64Sub(
+ m.Float64Constant(-0.0),
+ m.LoadFromPointer(&input, kMachFloat64)))));
m.Return(m.Int32Constant(0));
for (size_t i = 0; i < arraysize(kValues); ++i) {
input = kValues[i];
double input = -1.0;
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
- if (!m.machine()->HasFloat64Ceil()) return;
+ if (!m.machine()->HasFloat64RoundTruncate()) return;
m.StoreToPointer(
&result, kMachFloat64,
m.Float64RoundTruncate(m.LoadFromPointer(&input, kMachFloat64)));
Node* call =
graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Floor);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64RoundDown);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Floor(p0));
+ EXPECT_THAT(r.replacement(), IsFloat64RoundDown(p0));
}
}
}
}
-
-// -----------------------------------------------------------------------------
-// Math.ceil
-
-
-TEST_F(JSBuiltinReducerTest, MathCeilAvailable) {
- Handle<JSFunction> f = MathFunction("ceil");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Ceil);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Ceil(p0));
- }
-}
-
-
-TEST_F(JSBuiltinReducerTest, MathCeilUnavailable) {
- Handle<JSFunction> f = MathFunction("ceil");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
-
- ASSERT_FALSE(r.Changed());
- }
-}
} // namespace compiler
} // namespace internal
} // namespace v8
PURE(Float64Mod, 2, 0, 1), PURE(Float64Sqrt, 1, 0, 1),
PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1),
PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1),
- PURE(Float64Floor, 1, 0, 1), PURE(Float64Ceil, 1, 0, 1),
- PURE(Float64RoundTruncate, 1, 0, 1), PURE(Float64RoundTiesAway, 1, 0, 1),
- PURE(Float64ExtractLowWord32, 1, 0, 1),
+ PURE(Float64RoundDown, 1, 0, 1), PURE(Float64RoundTruncate, 1, 0, 1),
+ PURE(Float64RoundTiesAway, 1, 0, 1), PURE(Float64ExtractLowWord32, 1, 0, 1),
PURE(Float64ExtractHighWord32, 1, 0, 1),
PURE(Float64InsertLowWord32, 2, 0, 1),
PURE(Float64InsertHighWord32, 2, 0, 1)
IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
IS_UNOP_MATCHER(Float64Sqrt)
-IS_UNOP_MATCHER(Float64Floor)
-IS_UNOP_MATCHER(Float64Ceil)
+IS_UNOP_MATCHER(Float64RoundDown)
IS_UNOP_MATCHER(Float64RoundTruncate)
IS_UNOP_MATCHER(Float64RoundTiesAway)
IS_UNOP_MATCHER(Float64ExtractLowWord32)
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundDown(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64ExtractLowWord32(const Matcher<Node*>& input_matcher);
FILENAME = "src/runtime/runtime.h"
LISTHEAD = re.compile(r"#define\s+(\w+LIST\w*)\((\w+)\)")
LISTBODY = re.compile(r".*\\$")
-BLACKLIST = ['INLINE_FUNCTION_LIST']
+BLACKLIST = ['INLINE_FUNCTION_LIST', 'INLINE_OPTIMIZED_FUNCTION_LIST']
class Function(object):