Extend JSBuiltinReducer to cover Math.sqrt as well.
authormstarzinger@chromium.org <mstarzinger@chromium.org>
Wed, 24 Sep 2014 10:24:19 +0000 (10:24 +0000)
committermstarzinger@chromium.org <mstarzinger@chromium.org>
Wed, 24 Sep 2014 10:24:19 +0000 (10:24 +0000)
R=bmeurer@chromium.org
TEST=compiler-unittests/JSBuiltinReducerTest.MathSqrt

Review URL: https://codereview.chromium.org/595963002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24177 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

23 files changed:
src/compiler/arm/code-generator-arm.cc
src/compiler/arm/instruction-codes-arm.h
src/compiler/arm/instruction-selector-arm.cc
src/compiler/arm64/code-generator-arm64.cc
src/compiler/arm64/instruction-codes-arm64.h
src/compiler/arm64/instruction-selector-arm64.cc
src/compiler/graph-unittest.cc
src/compiler/graph-unittest.h
src/compiler/ia32/code-generator-ia32.cc
src/compiler/ia32/instruction-codes-ia32.h
src/compiler/ia32/instruction-selector-ia32.cc
src/compiler/instruction-selector.cc
src/compiler/js-builtin-reducer-unittest.cc
src/compiler/js-builtin-reducer.cc
src/compiler/js-builtin-reducer.h
src/compiler/machine-operator-unittest.cc
src/compiler/machine-operator.cc
src/compiler/machine-operator.h
src/compiler/opcodes.h
src/compiler/simplified-lowering.cc
src/compiler/x64/code-generator-x64.cc
src/compiler/x64/instruction-codes-x64.h
src/compiler/x64/instruction-selector-x64.cc

index c995d11b1a96a09efcf38daec6778dafecbf668f..1ec174d7917e34dcc91e436808f21ead4949dbae 100644 (file)
@@ -323,6 +323,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kArmVnegF64:
       __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
+    case kArmVsqrtF64:
+      __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kArmVcvtF64S32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
index 41f0c09bacf5707be1aff7252f9e9239b5497875..7849ca91c1568a8d54711eeb170b2564f99b6ab4 100644 (file)
@@ -41,6 +41,7 @@ namespace compiler {
   V(ArmVdivF64)                    \
   V(ArmVmodF64)                    \
   V(ArmVnegF64)                    \
+  V(ArmVsqrtF64)                   \
   V(ArmVcvtF64S32)                 \
   V(ArmVcvtF64U32)                 \
   V(ArmVcvtS32F64)                 \
index a37ebf21df963b2852cfe98599dddc5950f74834..ae93b27f453f0767343202b036a4c2dd87f182e4 100644 (file)
@@ -90,6 +90,7 @@ class ArmOperandGenerator FINAL : public OperandGenerator {
       case kArmVdivF64:
       case kArmVmodF64:
       case kArmVnegF64:
+      case kArmVsqrtF64:
       case kArmVcvtF64S32:
       case kArmVcvtF64U32:
       case kArmVcvtS32F64:
@@ -768,6 +769,12 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
 }
 
 
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
                                     BasicBlock* deoptimization) {
   ArmOperandGenerator g(this);
index 4a9893f3b742b1d405bf581b936c159bb684679a..31c53d32748bfa8790ede39cbf09f02f1ad7a258 100644 (file)
@@ -373,6 +373,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
                        0, 2);
       break;
     }
+    case kArm64Float64Sqrt:
+      __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kArm64Float64ToInt32:
       __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
       break;
index 656b48083c5bd2961eb5cb6cacf60da4cfb3d7d8..0a9a2ede21a14ff2d6e0af032096027befbdcd3a 100644 (file)
@@ -62,6 +62,7 @@ namespace compiler {
   V(Arm64Float64Mul)               \
   V(Arm64Float64Div)               \
   V(Arm64Float64Mod)               \
+  V(Arm64Float64Sqrt)              \
   V(Arm64Float64ToInt32)           \
   V(Arm64Float64ToUint32)          \
   V(Arm64Int32ToFloat64)           \
index eac1ec63e9557b73dc88087b2dc538c4773c9ac1..472ce6fe78a10ddc4fe6a79795c826edcb314ff4 100644 (file)
@@ -506,6 +506,13 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
 }
 
 
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
                                                     FlagsContinuation* cont) {
   VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
index 881c2ccacc54d1616e3294f25a8b8c3548c51947..75e70cb05d84c582a9ae782431c929a58eb3e4ad 100644 (file)
@@ -771,6 +771,7 @@ IS_UNOP_MATCHER(ChangeUint32ToFloat64)
 IS_UNOP_MATCHER(ChangeUint32ToUint64)
 IS_UNOP_MATCHER(TruncateFloat64ToInt32)
 IS_UNOP_MATCHER(TruncateInt64ToInt32)
+IS_UNOP_MATCHER(Float64Sqrt)
 #undef IS_UNOP_MATCHER
 
 }  // namespace compiler
index 39d3e15a16b8280f68dddf018ec7282ab2e0efd3..1dc9c3dae72b215edf9f65cbf9d3024fbf15f6c6 100644 (file)
@@ -131,6 +131,7 @@ Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
 
 }  //  namespace compiler
 }  //  namespace internal
index 200dcb66053fcd7de01c51274ffc44108c3a136b..deab7cd9f6239f979c03db4fb1a6b92837449862 100644 (file)
@@ -285,6 +285,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ add(esp, Immediate(kDoubleSize));
       break;
     }
+    case kSSEFloat64Sqrt:
+      __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
     case kSSEFloat64ToInt32:
       __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
       break;
index d8ea014a934d86aec3896976c12ba991e935bcae..0f4608839226832c2cdd473645adcc7449f3b838 100644 (file)
@@ -34,6 +34,7 @@ namespace compiler {
   V(SSEFloat64Mul)                 \
   V(SSEFloat64Div)                 \
   V(SSEFloat64Mod)                 \
+  V(SSEFloat64Sqrt)                \
   V(SSEFloat64ToInt32)             \
   V(SSEFloat64ToUint32)            \
   V(SSEInt32ToFloat64)             \
index ce8cb0f6306410a2c5999553418d0388b011ad80..24ebc38393cd1e2e972a4996e891ed8ec9bfa64b 100644 (file)
@@ -417,6 +417,12 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
 }
 
 
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
                                                     FlagsContinuation* cont) {
   VisitBinop(this, node, kIA32Add, cont);
index 6bc41f47dfcabdd1f14bb9b10847eb8d8bf0bb61..3c32b642adb4c2cb937d55f8fe2c2f782146c2cb 100644 (file)
@@ -596,6 +596,8 @@ void InstructionSelector::VisitNode(Node* node) {
       return MarkAsDouble(node), VisitFloat64Div(node);
     case IrOpcode::kFloat64Mod:
       return MarkAsDouble(node), VisitFloat64Mod(node);
+    case IrOpcode::kFloat64Sqrt:
+      return MarkAsDouble(node), VisitFloat64Sqrt(node);
     case IrOpcode::kFloat64Equal:
       return VisitFloat64Equal(node);
     case IrOpcode::kFloat64LessThan:
index 557ce27fa3911d3931ea65281b57c7ada5160a95..51561d0732665f15e067c6d3c7b29ee742621ffc 100644 (file)
@@ -59,6 +59,26 @@ Type* const kNumberTypes[] = {
 }  // namespace
 
 
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+
+TEST_F(JSBuiltinReducerTest, MathSqrt) {
+  Handle<JSFunction> f(isolate()->context()->math_sqrt_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+                                  fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call);
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Math.max
 
@@ -71,7 +91,7 @@ TEST_F(JSBuiltinReducerTest, MathMax0) {
                                 fun, UndefinedConstant());
   Reduction r = Reduce(call);
 
-  EXPECT_TRUE(r.Changed());
+  ASSERT_TRUE(r.Changed());
   EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
 }
 
@@ -86,7 +106,7 @@ TEST_F(JSBuiltinReducerTest, MathMax1) {
                                   fun, UndefinedConstant(), p0);
     Reduction r = Reduce(call);
 
-    EXPECT_TRUE(r.Changed());
+    ASSERT_TRUE(r.Changed());
     EXPECT_THAT(r.replacement(), p0);
   }
 }
@@ -107,7 +127,7 @@ TEST_F(JSBuiltinReducerTest, MathMax2) {
 
       if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
         Capture<Node*> branch;
-        EXPECT_TRUE(r.Changed());
+        ASSERT_TRUE(r.Changed());
         EXPECT_THAT(
             r.replacement(),
             IsPhi(kMachNone, p1, p0,
@@ -116,7 +136,7 @@ TEST_F(JSBuiltinReducerTest, MathMax2) {
                                           IsBranch(IsNumberLessThan(p0, p1),
                                                    graph()->start()))))));
       } else {
-        EXPECT_FALSE(r.Changed());
+        ASSERT_FALSE(r.Changed());
         EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
       }
     }
@@ -142,10 +162,10 @@ TEST_F(JSBuiltinReducerTest, MathImul) {
       Reduction r = Reduce(call);
 
       if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
-        EXPECT_TRUE(r.Changed());
+        ASSERT_TRUE(r.Changed());
         EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
       } else {
-        EXPECT_FALSE(r.Changed());
+        ASSERT_FALSE(r.Changed());
         EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
       }
     }
index 83161e14a5d5388582c23fa9a4617725a371aaa6..c57ac331defc84611a30732cefea7cb0ae348863 100644 (file)
@@ -95,6 +95,18 @@ class JSCallReduction {
 };
 
 
+// ECMA-262, section 15.8.2.17.
+Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.sqrt(a:number) -> Float64Sqrt(a)
+    Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
 // ECMA-262, section 15.8.2.11.
 Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
   JSCallReduction r(node);
@@ -145,6 +157,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
   // Dispatch according to the BuiltinFunctionId if present.
   if (!r.HasBuiltinFunctionId()) return NoChange();
   switch (r.GetBuiltinFunctionId()) {
+    case kMathSqrt:
+      return ReplaceWithPureReduction(node, ReduceMathSqrt(node));
     case kMathMax:
       return ReplaceWithPureReduction(node, ReduceMathMax(node));
     case kMathImul:
index 92c7b4ae6ee06f6482bb352b512bc0ff49351ca7..13927f6b213638b85bf719f402f15ca2fdb31d33 100644 (file)
@@ -30,6 +30,7 @@ class JSBuiltinReducer FINAL : public Reducer {
   MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
   SimplifiedOperatorBuilder* simplified() { return &simplified_; }
 
+  Reduction ReduceMathSqrt(Node* node);
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
 
index 6aaf06f7510d4c8ef280596deb4c79b47409ef64..cb93ce76c6115b9edb49b332c62ab0f1a3375bd0 100644 (file)
@@ -213,8 +213,8 @@ const PureOperator kPureOperators[] = {
     PURE(TruncateInt64ToInt32, 1, 1),     PURE(Float64Add, 2, 1),
     PURE(Float64Sub, 2, 1),               PURE(Float64Mul, 2, 1),
     PURE(Float64Div, 2, 1),               PURE(Float64Mod, 2, 1),
-    PURE(Float64Equal, 2, 1),             PURE(Float64LessThan, 2, 1),
-    PURE(Float64LessThanOrEqual, 2, 1)
+    PURE(Float64Sqrt, 1, 1),              PURE(Float64Equal, 2, 1),
+    PURE(Float64LessThan, 2, 1),          PURE(Float64LessThanOrEqual, 2, 1)
 #undef PURE
 };
 
index eb3e948dd8acd9e7057d513a44dfe7b4c510f9e2..2f30bd214db1ac722288cc3798f28dbb1f494b99 100644 (file)
@@ -112,6 +112,7 @@ struct StaticParameterTraits<LoadRepresentation> {
   V(Float64Mul, Operator::kCommutative, 2, 1)                                 \
   V(Float64Div, Operator::kNoProperties, 2, 1)                                \
   V(Float64Mod, Operator::kNoProperties, 2, 1)                                \
+  V(Float64Sqrt, Operator::kNoProperties, 1, 1)                               \
   V(Float64Equal, Operator::kCommutative, 2, 1)                               \
   V(Float64LessThan, Operator::kNoProperties, 2, 1)                           \
   V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 1)
index 23b7ef6453215b4b0e5d8b1ec7f684da30b51309..92c8ac420f71523c782c38e7436ab3ea47b138f0 100644 (file)
@@ -129,6 +129,7 @@ class MachineOperatorBuilder FINAL {
   const Operator* Float64Mul();
   const Operator* Float64Div();
   const Operator* Float64Mod();
+  const Operator* Float64Sqrt();
 
   // Floating point comparisons complying to IEEE 754.
   const Operator* Float64Equal();
index dabf5c57ef86261869756f18587b74a1786201bf..e210abd6b96394c4f53188b44789ae9d05667410 100644 (file)
   V(Float64Mul)               \
   V(Float64Div)               \
   V(Float64Mod)               \
+  V(Float64Sqrt)              \
   V(Float64Equal)             \
   V(Float64LessThan)          \
   V(Float64LessThanOrEqual)
index 8ac2a85dc92054f8e98d0e0ed0330ce22ef97cd0..f79452503648d6890885cf49865811120e49bb05 100644 (file)
@@ -724,6 +724,8 @@ class RepresentationSelector {
       case IrOpcode::kFloat64Div:
       case IrOpcode::kFloat64Mod:
         return VisitFloat64Binop(node);
+      case IrOpcode::kFloat64Sqrt:
+        return VisitUnop(node, kMachFloat64, kMachFloat64);
       case IrOpcode::kFloat64Equal:
       case IrOpcode::kFloat64LessThan:
       case IrOpcode::kFloat64LessThanOrEqual:
index 4d078b759608513a6cabd9b02e5d6a99215d0a68..f71d3bf1ba004ce1d2dee27f32ff3f71da6c11d2 100644 (file)
@@ -447,6 +447,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ addq(rsp, Immediate(kDoubleSize));
       break;
     }
+    case kSSEFloat64Sqrt: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kDoubleRegister) {
+        __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
+      } else {
+        __ sqrtsd(i.OutputDoubleRegister(), input.operand);
+      }
+      break;
+    }
     case kSSEFloat64ToInt32: {
       RegisterOrOperand input = i.InputRegisterOrOperand(0);
       if (input.type == kDoubleRegister) {
index c54d7eef1d6c93fdf18b1290372b11a1a57018e6..dfad2035fffd477e45c9cba9f126560e127571de 100644 (file)
@@ -50,6 +50,7 @@ namespace compiler {
   V(SSEFloat64Mul)                 \
   V(SSEFloat64Div)                 \
   V(SSEFloat64Mod)                 \
+  V(SSEFloat64Sqrt)                \
   V(SSEFloat64ToInt32)             \
   V(SSEFloat64ToUint32)            \
   V(SSEInt32ToFloat64)             \
index 96501e686b3b7eb0a73ee519c4893d7b14e61af4..5fe7bad81d93ea93fd4cded75390f505d534e40e 100644 (file)
@@ -559,6 +559,12 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
 }
 
 
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
                                                     FlagsContinuation* cont) {
   VisitBinop(this, node, kX64Add32, cont);