case kArmVnegF64:
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArmVsqrtF64:
+ __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArmVcvtF64S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
V(ArmVdivF64) \
V(ArmVmodF64) \
V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
V(ArmVcvtF64S32) \
V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \
case kArmVdivF64:
case kArmVmodF64:
case kArmVnegF64:
+ case kArmVsqrtF64:
case kArmVcvtF64S32:
case kArmVcvtF64U32:
case kArmVcvtS32F64:
}
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization) {
ArmOperandGenerator g(this);
0, 2);
break;
}
+ case kArm64Float64Sqrt:
+ __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Float64ToInt32:
__ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
V(Arm64Float64Mul) \
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
+ V(Arm64Float64Sqrt) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \
}
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
IS_UNOP_MATCHER(ChangeUint32ToUint64)
IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
+IS_UNOP_MATCHER(Float64Sqrt)
#undef IS_UNOP_MATCHER
} // namespace compiler
Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
} // namespace compiler
} // namespace internal
__ add(esp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Sqrt:
+ __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
}
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kIA32Add, cont);
return MarkAsDouble(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
return MarkAsDouble(node), VisitFloat64Mod(node);
+ case IrOpcode::kFloat64Sqrt:
+ return MarkAsDouble(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
} // namespace
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+
+TEST_F(JSBuiltinReducerTest, MathSqrt) {
+ Handle<JSFunction> f(isolate()->context()->math_sqrt_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Math.max
fun, UndefinedConstant());
Reduction r = Reduce(call);
- EXPECT_TRUE(r.Changed());
+ ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
}
fun, UndefinedConstant(), p0);
Reduction r = Reduce(call);
- EXPECT_TRUE(r.Changed());
+ ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), p0);
}
}
if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
Capture<Node*> branch;
- EXPECT_TRUE(r.Changed());
+ ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsPhi(kMachNone, p1, p0,
IsBranch(IsNumberLessThan(p0, p1),
graph()->start()))))));
} else {
- EXPECT_FALSE(r.Changed());
+ ASSERT_FALSE(r.Changed());
EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
}
}
Reduction r = Reduce(call);
if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
- EXPECT_TRUE(r.Changed());
+ ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
} else {
- EXPECT_FALSE(r.Changed());
+ ASSERT_FALSE(r.Changed());
EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
}
}
};
+// ECMA-262, section 15.8.2.17.
+Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.sqrt(a:number) -> Float64Sqrt(a)
+ Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
// ECMA-262, section 15.8.2.11.
Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
JSCallReduction r(node);
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
+ case kMathSqrt:
+ return ReplaceWithPureReduction(node, ReduceMathSqrt(node));
case kMathMax:
return ReplaceWithPureReduction(node, ReduceMathMax(node));
case kMathImul:
MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ Reduction ReduceMathSqrt(Node* node);
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
PURE(TruncateInt64ToInt32, 1, 1), PURE(Float64Add, 2, 1),
PURE(Float64Sub, 2, 1), PURE(Float64Mul, 2, 1),
PURE(Float64Div, 2, 1), PURE(Float64Mod, 2, 1),
- PURE(Float64Equal, 2, 1), PURE(Float64LessThan, 2, 1),
- PURE(Float64LessThanOrEqual, 2, 1)
+ PURE(Float64Sqrt, 1, 1), PURE(Float64Equal, 2, 1),
+ PURE(Float64LessThan, 2, 1), PURE(Float64LessThanOrEqual, 2, 1)
#undef PURE
};
V(Float64Mul, Operator::kCommutative, 2, 1) \
V(Float64Div, Operator::kNoProperties, 2, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 1) \
+ V(Float64Sqrt, Operator::kNoProperties, 1, 1) \
V(Float64Equal, Operator::kCommutative, 2, 1) \
V(Float64LessThan, Operator::kNoProperties, 2, 1) \
V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 1)
const Operator* Float64Mul();
const Operator* Float64Div();
const Operator* Float64Mod();
+ const Operator* Float64Sqrt();
// Floating point comparisons complying to IEEE 754.
const Operator* Float64Equal();
V(Float64Mul) \
V(Float64Div) \
V(Float64Mod) \
+ V(Float64Sqrt) \
V(Float64Equal) \
V(Float64LessThan) \
V(Float64LessThanOrEqual)
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
return VisitFloat64Binop(node);
+ case IrOpcode::kFloat64Sqrt:
+ return VisitUnop(node, kMachFloat64, kMachFloat64);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
__ addq(rsp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Sqrt: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kDoubleRegister) {
+ __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
+ } else {
+ __ sqrtsd(i.OutputDoubleRegister(), input.operand);
+ }
+ break;
+ }
case kSSEFloat64ToInt32: {
RegisterOrOperand input = i.InputRegisterOrOperand(0);
if (input.type == kDoubleRegister) {
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
}
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kX64Add32, cont);