Collect type feedback on result of Math.[round|ceil|floor]
authordanno <danno@chromium.org>
Tue, 5 May 2015 07:55:51 +0000 (00:55 -0700)
committerCommit bot <commit-bot@chromium.org>
Tue, 5 May 2015 07:55:58 +0000 (07:55 +0000)
By recording invocations of these builtins that can return -0, we now learn to not emit Crankshaft code that only handles integer results, avoiding deopt loops.

Review URL: https://codereview.chromium.org/1053143005

Cr-Commit-Position: refs/heads/master@{#28215}

36 files changed:
src/arm/code-stubs-arm.cc
src/arm/macro-assembler-arm.cc
src/arm/macro-assembler-arm.h
src/arm64/code-stubs-arm64.cc
src/arm64/macro-assembler-arm64.cc
src/arm64/macro-assembler-arm64.h
src/assembler.cc
src/assembler.h
src/ast.h
src/code-stubs.cc
src/code-stubs.h
src/hydrogen.cc
src/hydrogen.h
src/ia32/code-stubs-ia32.cc
src/ia32/macro-assembler-ia32.cc
src/ia32/macro-assembler-ia32.h
src/ic/ic.cc
src/math.js
src/mips/code-stubs-mips.cc
src/mips/macro-assembler-mips.cc
src/mips/macro-assembler-mips.h
src/mips64/code-stubs-mips64.cc
src/mips64/macro-assembler-mips64.cc
src/mips64/macro-assembler-mips64.h
src/snapshot/serialize.cc
src/type-feedback-vector.cc
src/type-feedback-vector.h
src/type-info.cc
src/type-info.h
src/typing.cc
src/x64/code-stubs-x64.cc
src/x64/macro-assembler-x64.cc
src/x64/macro-assembler-x64.h
test/mjsunit/math-ceil-minus-zero.js [new file with mode: 0644]
test/mjsunit/math-floor-minus-zero.js [new file with mode: 0644]
test/mjsunit/math-round-minus-zero.js [new file with mode: 0644]

index be55f0b062e1b5e1e90c4479494ee07ffc8e3ff8..f7f8270e5f238cec6e8cf31d7248d75ce30b0351 100644 (file)
@@ -2713,6 +2713,166 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+  Register function = r1;
+  Register vector = r2;
+  Register slot = r3;
+
+  Register temp1 = r0;
+  Register temp2 = r4;
+  DwVfpRegister double_temp1 = d1;
+  DwVfpRegister double_temp2 = d2;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss);
+
+  if (arg_count() > 0) {
+    __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ sub(temp1, temp1, Operand(kHeapObjectTag));
+    __ vldr(double_temp1, temp1, HeapNumber::kValueOffset);
+
+    // If the number is >0, it doesn't round to -0
+    __ Vmov(double_temp2, 0, temp1);
+    __ VFPCompareAndSetFlags(double_temp1, double_temp2);
+    __ b(gt, &tail);
+
+    // If the number is <-.5, it doesn't round to -0
+    __ Vmov(double_temp2, -.5, temp1);
+    __ VFPCompareAndSetFlags(double_temp1, double_temp2);
+    __ b(lt, &tail);
+
+    // +0 doesn't round to -0
+    __ VmovHigh(temp1, double_temp1);
+    __ cmp(temp1, Operand(0x80000000));
+    __ b(ne, &tail);
+
+    __ mov(temp1, Operand(slot, LSL, 1));
+    __ add(temp1, temp1, vector);
+    __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ str(temp2,
+           FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ b(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+  Register function = r1;
+  Register vector = r2;
+  Register slot = r3;
+
+  Register temp1 = r0;
+  Register temp2 = r4;
+  DwVfpRegister double_temp = d1;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+  if (arg_count() > 0) {
+    __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ sub(temp1, temp1, Operand(kHeapObjectTag));
+    __ vldr(double_temp, temp1, HeapNumber::kValueOffset);
+
+    // Only -0 floors to -0.
+    __ VmovHigh(temp1, double_temp);
+    __ cmp(temp1, Operand(0x80000000));
+    __ b(ne, &tail);
+    __ VmovLow(temp1, double_temp);
+    __ cmp(temp1, Operand(0));
+    __ b(ne, &tail);
+
+    __ mov(temp1, Operand(slot, LSL, 1));
+    __ add(temp1, temp1, vector);
+    __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ str(temp2,
+           FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ b(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+  Register function = r1;
+  Register vector = r2;
+  Register slot = r3;
+
+  Register temp1 = r0;
+  Register temp2 = r4;
+  DwVfpRegister double_temp1 = d1;
+  DwVfpRegister double_temp2 = d2;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss);
+
+  if (arg_count() > 0) {
+    __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ sub(temp1, temp1, Operand(kHeapObjectTag));
+    __ vldr(double_temp1, temp1, HeapNumber::kValueOffset);
+
+    // If the number is >0, it doesn't round to -0
+    __ Vmov(double_temp2, 0, temp1);
+    __ VFPCompareAndSetFlags(double_temp1, double_temp2);
+    __ b(gt, &tail);
+
+    // If the number is <=-1, it doesn't round to -0
+    __ Vmov(double_temp2, -1, temp1);
+    __ VFPCompareAndSetFlags(double_temp1, double_temp2);
+    __ b(le, &tail);
+
+    // +0 doesn't round to -0.
+    __ VmovHigh(temp1, double_temp1);
+    __ cmp(temp1, Operand(0x80000000));
+    __ b(ne, &tail);
+
+    __ mov(temp1, Operand(slot, LSL, 1));
+    __ add(temp1, temp1, vector);
+    __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ str(temp2,
+           FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ b(&tail);
+}
+
+
 void CallICStub::Generate(MacroAssembler* masm) {
   // r1 - function
   // r3 - slot id (Smi)
@@ -2823,6 +2983,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
   __ cmp(r1, r4);
   __ b(eq, &miss);
 
+  // Some builtin functions require special handling, miss to the runtime.
+  __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+  __ cmp(r0, Operand(Smi::FromInt(0)));
+  __ b(ne, &miss);
+
   // Update stats.
   __ ldr(r4, FieldMemOperand(r2, with_types_offset));
   __ add(r4, r4, Operand(Smi::FromInt(1)));
@@ -4366,6 +4532,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, r2);
+  CallIC_RoundStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, r2);
+  CallIC_FloorStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, r2);
+  CallIC_CeilStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void VectorRawLoadStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
index 12108a00404f5764727e272e69daa3d3adf10de3..2a719f540ed95c62f8cc41705f18c168ae97d2c1 100644 (file)
@@ -2589,6 +2589,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
 }
 
 
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+                                        BuiltinFunctionId id, Label* miss) {
+  ldr(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  ldr(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+  cmp(temp, Operand(Smi::FromInt(id)));
+  b(ne, miss);
+}
+
+
 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                 Register scratch1, Register scratch2) {
   if (FLAG_native_code_counters && counter->Enabled()) {
index e6047adc48c23652b4f95a4fb7d18fd3362bca6d..9021c62c19c97a0090c98c5a68d5b523ea23fe06 100644 (file)
@@ -1145,6 +1145,9 @@ class MacroAssembler: public Assembler {
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
 
+  void BranchIfNotBuiltin(Register function, Register temp,
+                          BuiltinFunctionId id, Label* miss);
+
   Handle<Object> CodeObject() {
     DCHECK(!code_object_.is_null());
     return code_object_;
index 299c48b1da9942fdb61b18d880480bf0f59e86b1..d3f6158f0a0b142c7c160bbc01c87a62ef841f4e 100644 (file)
@@ -3112,6 +3112,162 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+  Register function = x1;
+  Register vector = x2;
+  Register slot = x3;
+
+  Register temp1 = x0;
+  Register temp2 = x4;
+  DoubleRegister double_temp1 = d1;
+  DoubleRegister double_temp2 = d2;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss);
+
+  if (arg_count() > 0) {
+    __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ Sub(temp1, temp1, Operand(kHeapObjectTag));
+    __ Ldr(double_temp1, MemOperand(temp1, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ Fmov(double_temp2, 0);
+    __ Fcmp(double_temp1, double_temp2);
+    __ B(gt, &tail);
+
+    // If the number is <-.5, it doesn't round to -0
+    __ Fmov(double_temp2, -.5);
+    __ Fcmp(double_temp1, double_temp2);
+    __ B(lt, &tail);
+
+    __ Fmov(temp1, double_temp1);
+    __ Cmp(temp1, Operand(0x8000000000000000));
+    __ B(ne, &tail);
+
+    __ SmiUntag(temp1, slot);
+    __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2));
+    __ Add(temp1, temp1, vector);
+    __ Mov(temp2, Smi::FromInt(kHasReturnedMinusZeroSentinel));
+    __ Str(temp2,
+           FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  __ Unreachable();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ b(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+  Register function = x1;
+  Register vector = x2;
+  Register slot = x3;
+
+  Register temp1 = x0;
+  Register temp2 = x4;
+  DoubleRegister double_temp = d1;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+  if (arg_count() > 0) {
+    __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ Sub(temp1, temp1, Operand(kHeapObjectTag));
+    __ Ldr(double_temp, MemOperand(temp1, HeapNumber::kValueOffset));
+
+    // Only -0 floors to -0.
+    __ Fmov(temp1, double_temp);
+    __ Cmp(temp1, Operand(0x8000000000000000));
+    __ B(ne, &tail);
+
+    __ SmiUntag(temp1, slot);
+    __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2));
+    __ Add(temp1, temp1, vector);
+    __ Mov(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ Str(temp2,
+           FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  __ Unreachable();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ b(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+  Register function = x1;
+  Register vector = x2;
+  Register slot = x3;
+
+  Register temp1 = x0;
+  Register temp2 = x4;
+  DoubleRegister double_temp1 = d1;
+  DoubleRegister double_temp2 = d2;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss);
+
+  if (arg_count() > 0) {
+    __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ Sub(temp1, temp1, Operand(kHeapObjectTag));
+    __ Ldr(double_temp1, MemOperand(temp1, HeapNumber::kValueOffset));
+
+    // If the number is positive, it doesn't ceil to -0
+    __ Fmov(double_temp2, 0);
+    __ Fcmp(double_temp1, double_temp2);
+    __ B(gt, &tail);
+
+    // If it's less or equal to  1, it doesn't ceil to -0
+    __ Fmov(double_temp2, -1);
+    __ Fcmp(double_temp1, double_temp2);
+    __ B(le, &tail);
+
+    // +Zero doesn't round to -0
+    __ Fmov(temp1, double_temp1);
+    __ Cmp(temp1, Operand(0x8000000000000000));
+    __ B(ne, &tail);
+
+    __ SmiUntag(temp1, slot);
+    __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2));
+    __ Add(temp1, temp1, vector);
+    __ Mov(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ Str(temp2,
+           FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  __ Unreachable();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ b(&tail);
+}
+
+
 void CallICStub::Generate(MacroAssembler* masm) {
   ASM_LOCATION("CallICStub");
 
@@ -3230,6 +3386,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
   __ Cmp(function, x5);
   __ B(eq, &miss);
 
+  // Some builtin functions require special handling, miss to the runtime.
+  __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x0, FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+  __ Cmp(x0, Operand(Smi::FromInt(0)));
+  __ B(ne, &miss);
+
   // Update stats.
   __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
   __ Adds(x4, x4, Operand(Smi::FromInt(1)));
@@ -4499,6 +4661,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, x2);
+  CallIC_RoundStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, x2);
+  CallIC_FloorStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, x2);
+  CallIC_CeilStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void VectorRawLoadStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
index 07e237e0b498906361f9103c19990dee78ff4dc7..52cc3fdd4ebd379a3dda8080e155e216841286f0 100644 (file)
@@ -1689,6 +1689,15 @@ void MacroAssembler::GetBuiltinEntry(Register target,
 }
 
 
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+                                        BuiltinFunctionId id, Label* miss) {
+  Ldr(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  Ldr(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+  Cmp(temp, Operand(Smi::FromInt(id)));
+  B(ne, miss);
+}
+
+
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
index 1160c40bf609ebf48d65cdf6e4c3c46c442a0d89..ccb1ec9575e5be761d025b981b237ae7cf95e4b2 100644 (file)
@@ -1143,6 +1143,9 @@ class MacroAssembler : public Assembler {
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
 
+  void BranchIfNotBuiltin(Register function, Register temp,
+                          BuiltinFunctionId id, Label* miss);
+
   void Jump(Register target);
   void Jump(Address target, RelocInfo::Mode rmode);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode);
index 1464074b894640d8d19e9104b96849e9526581f8..038363c7c0ff8c996483948c03521fc370964acb 100644 (file)
@@ -109,6 +109,7 @@ namespace internal {
 struct DoubleConstant BASE_EMBEDDED {
 double min_int;
 double one_half;
+double minus_one;
 double minus_one_half;
 double negative_infinity;
 double the_hole_nan;
@@ -950,6 +951,7 @@ void RelocInfo::Verify(Isolate* isolate) {
 void ExternalReference::SetUp() {
   double_constants.min_int = kMinInt;
   double_constants.one_half = 0.5;
+  double_constants.minus_one = -1;
   double_constants.minus_one_half = -0.5;
   double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
   double_constants.negative_infinity = -V8_INFINITY;
@@ -1274,6 +1276,12 @@ ExternalReference ExternalReference::address_of_minus_one_half() {
 }
 
 
+ExternalReference ExternalReference::address_of_minus_one() {
+  return ExternalReference(
+      reinterpret_cast<void*>(&double_constants.minus_one));
+}
+
+
 ExternalReference ExternalReference::address_of_negative_infinity() {
   return ExternalReference(
       reinterpret_cast<void*>(&double_constants.negative_infinity));
index fd66e0bfdb111b22c74f562cae1640c191481c2f..9fe4107163d6a641fb47e6b8cde5428c9c8ae32a 100644 (file)
@@ -957,6 +957,7 @@ class ExternalReference BASE_EMBEDDED {
   // Static variables containing common double constants.
   static ExternalReference address_of_min_int();
   static ExternalReference address_of_one_half();
+  static ExternalReference address_of_minus_one();
   static ExternalReference address_of_minus_one_half();
   static ExternalReference address_of_negative_infinity();
   static ExternalReference address_of_the_hole_nan();
index a686f91307e5190b6d8e66414ecc6a17a91a17a2..0cb506b47f944cb83307c1bb10595bf8f5f88b98 100644 (file)
--- a/src/ast.h
+++ b/src/ast.h
@@ -1818,6 +1818,13 @@ class Call final : public Expression {
     bit_field_ = IsUninitializedField::update(bit_field_, b);
   }
 
+  void MarkShouldHandleMinusZeroResult() {
+    bit_field_ = ShouldHandleMinusZeroResultField::update(bit_field_, true);
+  }
+  bool ShouldHandleMinusZeroResult() {
+    return ShouldHandleMinusZeroResultField::decode(bit_field_);
+  }
+
   enum CallType {
     POSSIBLY_EVAL_CALL,
     GLOBAL_CALL,
@@ -1844,7 +1851,8 @@ class Call final : public Expression {
         ic_slot_or_slot_(FeedbackVectorICSlot::Invalid().ToInt()),
         expression_(expression),
         arguments_(arguments),
-        bit_field_(IsUninitializedField::encode(false)) {
+        bit_field_(IsUninitializedField::encode(false) |
+                   ShouldHandleMinusZeroResultField::encode(false)) {
     if (expression->IsProperty()) {
       expression->AsProperty()->mark_for_call();
     }
@@ -1862,6 +1870,7 @@ class Call final : public Expression {
   Handle<JSFunction> target_;
   Handle<AllocationSite> allocation_site_;
   class IsUninitializedField : public BitField8<bool, 0, 1> {};
+  class ShouldHandleMinusZeroResultField : public BitField8<bool, 1, 1> {};
   uint8_t bit_field_;
 };
 
index 46d8342c50a8b2545a231740f920e27ef445753c..045c0e785f57c11ac9b99df315432359e6b6027c 100644 (file)
@@ -538,6 +538,21 @@ Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
 }
 
 
+void CallIC_RoundStub::PrintState(std::ostream& os) const {  // NOLINT
+  os << state() << " (Round)";
+}
+
+
+void CallIC_FloorStub::PrintState(std::ostream& os) const {  // NOLINT
+  os << state() << " (Floor)";
+}
+
+
+void CallIC_CeilStub::PrintState(std::ostream& os) const {  // NOLINT
+  os << state() << " (Ceil)";
+}
+
+
 void CallIC_ArrayStub::PrintState(std::ostream& os) const {  // NOLINT
   os << state() << " (Array)";
 }
index c6767e9b8b73f60c318e6a18952b0367aae8026e..aa9ba41b9b30baea46bcc72753ca262ed39972e7 100644 (file)
@@ -30,6 +30,9 @@ namespace internal {
   V(CallFunction)                           \
   V(CallIC)                                 \
   V(CallIC_Array)                           \
+  V(CallIC_Round)                           \
+  V(CallIC_Floor)                           \
+  V(CallIC_Ceil)                            \
   V(CEntry)                                 \
   V(CompareIC)                              \
   V(DoubleToI)                              \
@@ -41,6 +44,9 @@ namespace internal {
   V(LoadICTrampoline)                       \
   V(CallICTrampoline)                       \
   V(CallIC_ArrayTrampoline)                 \
+  V(CallIC_RoundTrampoline)                 \
+  V(CallIC_FloorTrampoline)                 \
+  V(CallIC_CeilTrampoline)                  \
   V(LoadIndexedInterceptor)                 \
   V(LoadIndexedString)                      \
   V(MathPow)                                \
@@ -870,6 +876,8 @@ class CallICStub: public PlatformCodeStub {
     return static_cast<ExtraICState>(minor_key_);
   }
 
+  static const int kHasReturnedMinusZeroSentinel = 1;
+
  protected:
   bool CallAsMethod() const {
     return state().call_type() == CallICState::METHOD;
@@ -892,6 +900,48 @@ class CallICStub: public PlatformCodeStub {
 };
 
 
+class CallIC_RoundStub : public CallICStub {
+ public:
+  CallIC_RoundStub(Isolate* isolate, const CallICState& state_in)
+      : CallICStub(isolate, state_in) {}
+
+  InlineCacheState GetICState() const final { return MONOMORPHIC; }
+
+ private:
+  void PrintState(std::ostream& os) const override;  // NOLINT
+
+  DEFINE_PLATFORM_CODE_STUB(CallIC_Round, CallICStub);
+};
+
+
+class CallIC_FloorStub : public CallICStub {
+ public:
+  CallIC_FloorStub(Isolate* isolate, const CallICState& state_in)
+      : CallICStub(isolate, state_in) {}
+
+  InlineCacheState GetICState() const final { return MONOMORPHIC; }
+
+ private:
+  void PrintState(std::ostream& os) const override;  // NOLINT
+
+  DEFINE_PLATFORM_CODE_STUB(CallIC_Floor, CallICStub);
+};
+
+
+class CallIC_CeilStub : public CallICStub {
+ public:
+  CallIC_CeilStub(Isolate* isolate, const CallICState& state_in)
+      : CallICStub(isolate, state_in) {}
+
+  InlineCacheState GetICState() const final { return MONOMORPHIC; }
+
+ private:
+  void PrintState(std::ostream& os) const override;  // NOLINT
+
+  DEFINE_PLATFORM_CODE_STUB(CallIC_Ceil, CallICStub);
+};
+
+
 class CallIC_ArrayStub: public CallICStub {
  public:
   CallIC_ArrayStub(Isolate* isolate, const CallICState& state_in)
@@ -2104,6 +2154,36 @@ class CallIC_ArrayTrampolineStub : public CallICTrampolineStub {
 };
 
 
+class CallIC_RoundTrampolineStub : public CallICTrampolineStub {
+ public:
+  CallIC_RoundTrampolineStub(Isolate* isolate, const CallICState& state)
+      : CallICTrampolineStub(isolate, state) {}
+
+ private:
+  DEFINE_PLATFORM_CODE_STUB(CallIC_RoundTrampoline, CallICTrampolineStub);
+};
+
+
+class CallIC_FloorTrampolineStub : public CallICTrampolineStub {
+ public:
+  CallIC_FloorTrampolineStub(Isolate* isolate, const CallICState& state)
+      : CallICTrampolineStub(isolate, state) {}
+
+ private:
+  DEFINE_PLATFORM_CODE_STUB(CallIC_FloorTrampoline, CallICTrampolineStub);
+};
+
+
+class CallIC_CeilTrampolineStub : public CallICTrampolineStub {
+ public:
+  CallIC_CeilTrampolineStub(Isolate* isolate, const CallICState& state)
+      : CallICTrampolineStub(isolate, state) {}
+
+ private:
+  DEFINE_PLATFORM_CODE_STUB(CallIC_CeilTrampoline, CallICTrampolineStub);
+};
+
+
 class MegamorphicLoadStub : public HydrogenCodeStub {
  public:
   MegamorphicLoadStub(Isolate* isolate, const LoadICState& state)
index 9bdc070300512b44f0c6ea5ba292b8530655ef9f..15c83c6cff4a1bba44cde057ad3c36d91f3ab970 100644 (file)
@@ -8341,8 +8341,13 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
       if (!FLAG_fast_math) break;
       // Fall through if FLAG_fast_math.
     case kMathRound:
-    case kMathFround:
     case kMathFloor:
+      // If round has seen minus zero, don't inline, since that assumes
+      // returned value is an integer, which -0 definitely is not.
+      if (expr->ShouldHandleMinusZeroResult()) {
+        break;
+      }
+    case kMathFround:
     case kMathAbs:
     case kMathSqrt:
     case kMathLog:
@@ -8355,6 +8360,28 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
         return true;
       }
       break;
+    case kMathCeil:
+      // If round/floor has seen minus zero, don't inline, since that assumes
+      // returned value is an integer, which -0 definitely is not.
+      if (expr->ShouldHandleMinusZeroResult()) {
+        break;
+      }
+      if (expr->arguments()->length() == 1) {
+        HValue* argument = Pop();
+        Drop(2);  // Receiver and function.
+        HValue* op = NULL;
+        {
+          NoObservableSideEffectsScope s(this);
+          HValue* neg_arg =
+              AddUncasted<HMul>(graph()->GetConstantMinus1(), argument);
+          op = AddUncasted<HUnaryMathOperation>(neg_arg, kMathFloor);
+        }
+        HInstruction* neg_op =
+            NewUncasted<HMul>(graph()->GetConstantMinus1(), op);
+        ast_context()->ReturnInstruction(neg_op, expr->id());
+        return true;
+      }
+      break;
     case kMathImul:
       if (expr->arguments()->length() == 2) {
         HValue* right = Pop();
@@ -8448,8 +8475,13 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
       if (!FLAG_fast_math) break;
       // Fall through if FLAG_fast_math.
     case kMathRound:
-    case kMathFround:
     case kMathFloor:
+      // If round/floor has seen minus zero, don't inline, since that assumes
+      // returned value is an integer, which -0 definitely is not.
+      if (expr->ShouldHandleMinusZeroResult()) {
+        break;
+      }
+    case kMathFround:
     case kMathAbs:
     case kMathSqrt:
     case kMathLog:
@@ -8462,6 +8494,28 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
         return true;
       }
       break;
+    case kMathCeil:
+      // If round/floor has seen minus zero, don't inline, since that assumes
+      // returned value is an integer, which -0 definitely is not.
+      if (expr->ShouldHandleMinusZeroResult()) {
+        break;
+      }
+      if (argument_count == 2) {
+        HValue* argument = Pop();
+        Drop(2);  // Receiver and function.
+        HValue* op = NULL;
+        {
+          NoObservableSideEffectsScope s(this);
+          HValue* neg_arg =
+              AddUncasted<HMul>(graph()->GetConstantMinus1(), argument);
+          op = AddUncasted<HUnaryMathOperation>(neg_arg, kMathFloor);
+        }
+        HInstruction* neg_op =
+            NewUncasted<HMul>(graph()->GetConstantMinus1(), op);
+        ast_context()->ReturnInstruction(neg_op, expr->id());
+        return true;
+      }
+      break;
     case kMathPow:
       if (argument_count == 3) {
         HValue* right = Pop();
@@ -12209,15 +12263,6 @@ void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
index 3eb34a2d79200e6124a54ce3d31486d92fc9413e..2b96032415ccbb7a1aaf99f073229321b3c43d73 100644 (file)
@@ -2225,7 +2225,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
   F(DoubleHi)                          \
   F(DoubleLo)                          \
   F(MathClz32)                         \
-  F(MathFloor)                         \
   F(MathSqrt)                          \
   F(MathLogRT)                         \
   /* ES6 Collections */                \
index 727fae5b800d393eda79b1143d8285a23e22c1c3..5687435cc2b99af971505caf85f6e9cf1f33e68a 100644 (file)
@@ -2265,6 +2265,157 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+  Register function = edi;
+  Register vector = ebx;
+  Register slot = edx;
+
+  Register temp = eax;
+  XMMRegister xmm_temp1 = xmm0;
+  XMMRegister xmm_temp2 = xmm1;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp, kMathRound, &miss);
+
+  if (arg_count() > 0) {
+    ExternalReference minus_one_half =
+        ExternalReference::address_of_minus_one_half();
+
+    __ mov(temp, Operand(esp, arg_count() * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+    // If the number is positive, it doesn't round to -0
+    __ movsd(xmm_temp1, FieldOperand(eax, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ xorps(xmm_temp2, xmm_temp2);
+    __ ucomisd(xmm_temp1, xmm_temp2);
+    __ j(above, &tail, Label::kNear);
+
+    // If the number is <-.5, it doesn't round to -0
+    __ movsd(xmm_temp2, Operand::StaticVariable(minus_one_half));
+    __ ucomisd(xmm_temp1, xmm_temp2);
+    __ j(below, &tail, Label::kNear);
+
+    // The only positive result remaining is 0, it doesn't round to -0..
+    __ movmskpd(temp, xmm_temp1);
+    __ test(temp, Immediate(1));
+    __ j(zero, &tail, Label::kNear);
+
+    __ mov(FieldOperand(vector, slot, times_half_pointer_size,
+                        FixedArray::kHeaderSize + kPointerSize),
+           Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+  }
+
+  __ bind(&tail);
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ int3();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ jmp(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+  Register function = edi;
+  Register vector = ebx;
+  Register slot = edx;
+
+  Register temp = eax;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp, kMathFloor, &miss);
+
+  if (arg_count() > 0) {
+    __ mov(temp, Operand(esp, arg_count() * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+    // The only number that floors to -0 is -0.
+    __ cmp(FieldOperand(temp, HeapNumber::kExponentOffset),
+           Immediate(0x80000000));
+    __ j(not_equal, &tail);
+
+    __ cmp(FieldOperand(temp, HeapNumber::kMantissaOffset), Immediate(0));
+    __ j(not_equal, &tail);
+
+    __ mov(FieldOperand(vector, slot, times_half_pointer_size,
+                        FixedArray::kHeaderSize + kPointerSize),
+           Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+  }
+
+  __ bind(&tail);
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ int3();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ jmp(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+  Register function = edi;
+  Register vector = ebx;
+  Register slot = edx;
+
+  Register temp = eax;
+  XMMRegister xmm_temp1 = xmm0;
+  XMMRegister xmm_temp2 = xmm1;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp, kMathCeil, &miss);
+
+  if (arg_count() > 0) {
+    ExternalReference minus_one = ExternalReference::address_of_minus_one();
+
+    __ mov(temp, Operand(esp, arg_count() * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+    __ movsd(xmm_temp1, FieldOperand(eax, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ xorps(xmm_temp2, xmm_temp2);
+    __ ucomisd(xmm_temp1, xmm_temp2);
+    __ j(greater, &tail, Label::kNear);
+
+    // If the number is <=-1, it doesn't round to -0
+    __ movsd(xmm_temp2, Operand::StaticVariable(minus_one));
+    __ ucomisd(xmm_temp1, xmm_temp2);
+    __ j(less_equal, &tail, Label::kNear);
+
+    // The only positive result remaining is 0, it doesn't round to -0..
+    __ movmskpd(temp, xmm_temp1);
+    __ test(temp, Immediate(1));
+    __ j(zero, &tail, Label::kNear);
+
+    __ mov(FieldOperand(vector, slot, times_half_pointer_size,
+                        FixedArray::kHeaderSize + kPointerSize),
+           Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+  }
+
+  __ bind(&tail);
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ int3();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ jmp(&tail);
+}
+
+
 void CallICStub::Generate(MacroAssembler* masm) {
   // edi - function
   // edx - slot id
@@ -2373,6 +2524,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
   __ cmp(edi, ecx);
   __ j(equal, &miss);
 
+  // Make sure that the function is not Math.floor, Math.round or Math.ceil
+  // which have special CallICs to handle -0.0.
+  __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+  __ cmp(eax, Immediate(Smi::FromInt(0)));
+  __ j(not_equal, &miss);
+
   // Update stats.
   __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
 
@@ -4649,6 +4807,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, ebx);
+  CallIC_RoundStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, ebx);
+  CallIC_FloorStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, ebx);
+  CallIC_CeilStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
index 4d599eef8d4186fbb1dfb7a780445ad8f8008a1b..2aab2e68782bd29b8cc8611824f4a5ea7e10a2e6 100644 (file)
@@ -2141,6 +2141,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
 }
 
 
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+                                        BuiltinFunctionId id, Label* miss) {
+  mov(temp, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  mov(temp, FieldOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+  cmp(temp, Immediate(Smi::FromInt(id)));
+  j(not_equal, miss);
+}
+
+
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
index 69b0c5f5a8d1fb53d6eca7a255d07ca415d5f3e8..2aeb36ee9af73e17a3edce2f77d57fc2056991df 100644 (file)
@@ -353,6 +353,9 @@ class MacroAssembler: public Assembler {
   // Store the code object for the given builtin in the target register.
   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
 
+  void BranchIfNotBuiltin(Register function, Register temp,
+                          BuiltinFunctionId id, Label* miss);
+
   // Expression support
   // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
   // hinders register renaming and makes dependence chains longer. So we use
index 69fe5f8b2ee527735f27ee5a7be96118cbf266c9..e35fea549ca80d2e5ccfdbb2e028a5551edffdc7 100644 (file)
@@ -2223,9 +2223,9 @@ bool CallIC::DoCustomHandler(Handle<Object> function,
   // Are we the array function?
   Handle<JSFunction> array_function =
       Handle<JSFunction>(isolate()->native_context()->array_function());
+  CallICNexus* nexus = casted_nexus<CallICNexus>();
   if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
     // Alter the slot.
-    CallICNexus* nexus = casted_nexus<CallICNexus>();
     nexus->ConfigureMonomorphicArray();
 
     // Vector-based ICs have a different calling convention in optimized code
@@ -2247,6 +2247,48 @@ bool CallIC::DoCustomHandler(Handle<Object> function,
     OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(),
                           MONOMORPHIC);
     return true;
+  } else {
+    Handle<JSFunction> maybe_builtin(Handle<JSFunction>::cast(function));
+    if (maybe_builtin->shared()->HasBuiltinFunctionId()) {
+      BuiltinFunctionId id = maybe_builtin->shared()->builtin_function_id();
+      switch (id) {
+        case kMathRound: {
+          nexus->ConfigureMonomorphicMathFunction(maybe_builtin);
+          if (AddressIsOptimizedCode()) {
+            CallIC_RoundStub stub(isolate(), callic_state);
+            set_target(*stub.GetCode());
+          } else {
+            CallIC_RoundTrampolineStub stub(isolate(), callic_state);
+            set_target(*stub.GetCode());
+          }
+          return true;
+        }
+        case kMathFloor:
+          nexus->ConfigureMonomorphicMathFunction(maybe_builtin);
+          if (AddressIsOptimizedCode()) {
+            CallIC_FloorStub stub(isolate(), callic_state);
+            set_target(*stub.GetCode());
+          } else {
+            CallIC_FloorTrampolineStub stub(isolate(), callic_state);
+            set_target(*stub.GetCode());
+          }
+          return true;
+          break;
+        case kMathCeil:
+          nexus->ConfigureMonomorphicMathFunction(maybe_builtin);
+          if (AddressIsOptimizedCode()) {
+            CallIC_CeilStub stub(isolate(), callic_state);
+            set_target(*stub.GetCode());
+          } else {
+            CallIC_CeilTrampolineStub stub(isolate(), callic_state);
+            set_target(*stub.GetCode());
+          }
+          return true;
+          break;
+        default:
+          break;
+      }
+    }
   }
   return false;
 }
index 1a5a1056f83c9f88d5a3e2c0fbca920de76ff990..13bcf0f655a114a0b29423d4149390ecce9ac0de 100644 (file)
@@ -52,7 +52,7 @@ function MathAtan2JS(y, x) {
 
 // ECMA 262 - 15.8.2.6
 function MathCeil(x) {
-  return -%_MathFloor(-x);
+  return -MathFloorJS(-x);
 }
 
 // ECMA 262 - 15.8.2.8
@@ -62,7 +62,7 @@ function MathExp(x) {
 
 // ECMA 262 - 15.8.2.9
 function MathFloorJS(x) {
-  return %_MathFloor(+x);
+  return %MathFloor(+x);
 }
 
 // ECMA 262 - 15.8.2.10
@@ -167,8 +167,8 @@ function MathSign(x) {
 // ES6 draft 09-27-13, section 20.2.2.34.
 function MathTrunc(x) {
   x = +x;
-  if (x > 0) return %_MathFloor(x);
-  if (x < 0) return -%_MathFloor(-x);
+  if (x > 0) return MathFloorJS(x);
+  if (x < 0) return -MathFloorJS(-x);
   // -0, 0 or NaN.
   return x;
 }
index 84f64e098e4e6d55c4ece31d6dd28839224b177a..711e60e3c7dc8ff9a90c0f05e54093e7f9a87c29 100644 (file)
@@ -2853,6 +2853,155 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+  Register function = a1;
+  Register vector = a2;
+  Register slot = a3;
+
+  Register temp1 = a0;
+  Register temp2 = t0;
+  DoubleRegister double_temp1 = f12;
+  DoubleRegister double_temp2 = f14;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss);
+
+  if (arg_count() > 0) {
+    __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ Move(double_temp2, 0.0);
+    __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2);
+
+    // If the number is <-.5, it doesn't round to -0
+    __ Move(double_temp2, -.5);
+    __ BranchF64(&tail, nullptr, lt, double_temp1, double_temp2);
+
+    // +0 doesn't round to -0
+    __ FmoveHigh(temp1, double_temp1);
+    __ Branch(&tail, ne, temp1, Operand(0x80000000));
+
+    __ sll(temp1, slot, 1);
+    __ Addu(temp1, temp1, vector);
+    __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ sw(temp2,
+          FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ Branch(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+  Register function = a1;
+  Register vector = a2;
+  Register slot = a3;
+
+  Register temp1 = a0;
+  Register temp2 = t0;
+  DoubleRegister double_temp = f12;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+  if (arg_count() > 0) {
+    __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ ldc1(double_temp, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+    // Only -0 floors to -0.
+    __ FmoveHigh(temp1, double_temp);
+    __ Branch(&tail, ne, temp1, Operand(0x80000000));
+    __ FmoveLow(temp1, double_temp);
+    __ Branch(&tail, ne, temp1, Operand(zero_reg));
+
+    __ sll(temp1, slot, 1);
+    __ Addu(temp1, temp1, vector);
+    __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ sw(temp2,
+          FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ Branch(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+  Register function = a1;
+  Register vector = a2;
+  Register slot = a3;
+
+  Register temp1 = a0;
+  Register temp2 = t0;
+  DoubleRegister double_temp1 = f12;
+  DoubleRegister double_temp2 = f14;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss);
+
+  if (arg_count() > 0) {
+    __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ Move(double_temp2, 0.0);
+    __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2);
+
+    // If the number is <=-1, it doesn't round to -0
+    __ Move(double_temp2, -1.0);
+    __ BranchF64(&tail, nullptr, le, double_temp1, double_temp2);
+
+    // +0 doesn't round to -0.
+    __ FmoveHigh(temp1, double_temp1);
+    __ Branch(&tail, ne, temp1, Operand(0x80000000));
+
+    __ sll(temp1, slot, 1);
+    __ Addu(temp1, temp1, vector);
+    __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ sw(temp2,
+          FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ Branch(&tail);
+}
+
+
 void CallICStub::Generate(MacroAssembler* masm) {
   // a1 - function
   // a3 - slot id (Smi)
@@ -2963,6 +3112,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
   __ Branch(&miss, eq, a1, Operand(t0));
 
+  // Some builtin functions require special handling, miss to the runtime.
+  __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+  __ Branch(&miss, ne, t0, Operand(Smi::FromInt(0)));
+
   // Update stats.
   __ lw(t0, FieldMemOperand(a2, with_types_offset));
   __ Addu(t0, t0, Operand(Smi::FromInt(1)));
@@ -4591,6 +4745,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, a2);
+  CallIC_RoundStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, a2);
+  CallIC_FloorStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, a2);
+  CallIC_CeilStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void VectorRawLoadStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
index 030a634ce93b614ada27cc52764ae3581199f5d6..8c0a15a384896ca0c045cbea1bf1c5a5ea5c8889 100644 (file)
@@ -4602,6 +4602,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
 }
 
 
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+                                        BuiltinFunctionId id, Label* miss) {
+  DCHECK(!temp.is(at));
+  lw(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  lw(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+  Branch(miss, ne, temp, Operand(Smi::FromInt(id)));
+}
+
+
 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                 Register scratch1, Register scratch2) {
   if (FLAG_native_code_counters && counter->Enabled()) {
index cdbd10331aa52d4d19361fa7c31a39992a750716..c46cfdcef732c4e67996b1906dbe5fd819c6a522 100644 (file)
@@ -1323,6 +1323,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
 
+  void BranchIfNotBuiltin(Register function, Register temp,
+                          BuiltinFunctionId id, Label* miss);
+
   struct Unresolved {
     int pc;
     uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
index 3870ff3158620ac8649a997f1b77b108976c7218..32a663460c4b7166e0728c19aa2d18e67e3a189a 100644 (file)
@@ -2931,6 +2931,155 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+  Register function = a1;
+  Register vector = a2;
+  Register slot = a3;
+
+  Register temp1 = a0;
+  Register temp2 = a4;
+  DoubleRegister double_temp1 = f12;
+  DoubleRegister double_temp2 = f14;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss);
+
+  if (arg_count() > 0) {
+    __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ Move(double_temp2, 0.0);
+    __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2);
+
+    // If the number is <-.5, it doesn't round to -0
+    __ Move(double_temp2, -.5);
+    __ BranchF64(&tail, nullptr, lt, double_temp1, double_temp2);
+
+    // +0 doesn't round to -0
+    __ FmoveHigh(temp1, double_temp1);
+    __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000));
+
+    __ SmiScale(temp1, slot, kPointerSizeLog2);
+    __ Daddu(temp1, temp1, vector);
+    __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ sd(temp2,
+          FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ Branch(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+  Register function = a1;
+  Register vector = a2;
+  Register slot = a3;
+
+  Register temp1 = a0;
+  Register temp2 = a4;
+  DoubleRegister double_temp = f12;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+  if (arg_count() > 0) {
+    __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ ldc1(double_temp, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+    // Only -0 floors to -0.
+    __ FmoveHigh(temp1, double_temp);
+    __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000));
+    __ FmoveLow(temp1, double_temp);
+    __ Branch(&tail, ne, temp1, Operand(zero_reg));
+
+    __ SmiScale(temp1, slot, kPointerSizeLog2);
+    __ Daddu(temp1, temp1, vector);
+    __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ sd(temp2,
+          FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ Branch(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+  Register function = a1;
+  Register vector = a2;
+  Register slot = a3;
+
+  Register temp1 = a0;
+  Register temp2 = a4;
+  DoubleRegister double_temp1 = f12;
+  DoubleRegister double_temp2 = f14;
+  Label tail, miss;
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss);
+
+  if (arg_count() > 0) {
+    __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+    __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ Move(double_temp2, 0.0);
+    __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2);
+
+    // If the number is <=-1, it doesn't round to -0
+    __ Move(double_temp2, -1.0);
+    __ BranchF64(&tail, nullptr, le, double_temp1, double_temp2);
+
+    // +0 doesn't round to -0.
+    __ FmoveHigh(temp1, double_temp1);
+    __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000));
+
+    __ SmiScale(temp1, slot, kPointerSizeLog2);
+    __ Daddu(temp1, temp1, vector);
+    __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+    __ sd(temp2,
+          FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+  }
+
+  __ bind(&tail);
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unreachable");
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ Branch(&tail);
+}
+
+
 void CallICStub::Generate(MacroAssembler* masm) {
   // a1 - function
   // a3 - slot id (Smi)
@@ -3041,6 +3190,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
   __ Branch(&miss, eq, a1, Operand(a4));
 
+  // Some builtin functions require special handling, miss to the runtime.
+  __ ld(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+  __ Branch(&miss, ne, t0, Operand(Smi::FromInt(0)));
+
   // Update stats.
   __ ld(a4, FieldMemOperand(a2, with_types_offset));
   __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
@@ -4635,6 +4789,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, a2);
+  CallIC_RoundStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, a2);
+  CallIC_FloorStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, a2);
+  CallIC_CeilStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void VectorRawLoadStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
index 0e7955de00d4be4d9b81802bc2ba5e8f6ab80f8b..89ed2cd88a4f586ac00ab9fb19e9e320e00d4d03 100644 (file)
@@ -4576,6 +4576,14 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
 }
 
 
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+                                        BuiltinFunctionId id, Label* miss) {
+  ld(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  ld(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+  Branch(miss, ne, temp, Operand(Smi::FromInt(id)));
+}
+
+
 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                 Register scratch1, Register scratch2) {
   if (FLAG_native_code_counters && counter->Enabled()) {
index 2d84b40618be1651e031e585e2ce18f4b64b58ed..f45c487235765d35aa81505ed44b39e4495490a1 100644 (file)
@@ -1356,6 +1356,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
 
+  void BranchIfNotBuiltin(Register function, Register temp,
+                          BuiltinFunctionId id, Label* miss);
+
   struct Unresolved {
     int pc;
     uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
index dbe92a6accb129520b22838e09ea8a7f3ee485a1..50264808c328a805c7d4439df3bffa259d1d875d 100644 (file)
@@ -130,6 +130,8 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
       "Logger::LeaveExternal");
   Add(ExternalReference::address_of_minus_one_half().address(),
       "double_constants.minus_one_half");
+  Add(ExternalReference::address_of_minus_one().address(),
+      "double_constants.minus_one");
   Add(ExternalReference::stress_deopt_count(isolate).address(),
       "Isolate::stress_deopt_count_address()");
 
index 3cf81f8c5f16228f2fece4935adcfe77e722b446..9b03e0fc1b513f4ed79a83324eaa878751658101 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "src/v8.h"
 
+#include "src/code-stubs.h"
 #include "src/ic/ic.h"
 #include "src/ic/ic-state.h"
 #include "src/objects.h"
@@ -312,7 +313,9 @@ InlineCacheState CallICNexus::StateFromFeedback() const {
   Isolate* isolate = GetIsolate();
   Object* feedback = GetFeedback();
   DCHECK(!FLAG_vector_ics ||
-         GetFeedbackExtra() == *vector()->UninitializedSentinel(isolate));
+         GetFeedbackExtra() == *vector()->UninitializedSentinel(isolate) ||
+         GetFeedbackExtra() ==
+             Smi::FromInt(CallICStub::kHasReturnedMinusZeroSentinel));
 
   if (feedback == *vector()->MegamorphicSentinel(isolate)) {
     return GENERIC;
@@ -343,6 +346,14 @@ void CallICNexus::ConfigureMonomorphicArray() {
 }
 
 
+void CallICNexus::ConfigureMonomorphicMathFunction(
+    Handle<JSFunction> function) {
+  Handle<WeakCell> new_cell = GetIsolate()->factory()->NewWeakCell(function);
+  SetFeedback(*new_cell);
+  SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate()));
+}
+
+
 void CallICNexus::ConfigureUninitialized() {
   SetFeedback(*vector()->UninitializedSentinel(GetIsolate()),
               SKIP_WRITE_BARRIER);
index f4887a3e604c11971b49000b70c965c95e9fc884..e4d384fe5cdbaf251f2e274523148742d2dbcf97 100644 (file)
@@ -355,6 +355,7 @@ class CallICNexus : public FeedbackNexus {
   void ConfigureUninitialized();
   void ConfigureGeneric();
   void ConfigureMonomorphicArray();
+  void ConfigureMonomorphicMathFunction(Handle<JSFunction> function);
   void ConfigureMonomorphic(Handle<JSFunction> function);
 
   InlineCacheState StateFromFeedback() const override;
index 4ad66f85573e78786bb0716be81550146bdd627a..f50949508c156b3f2f642ffe12cf4a16dde113e9 100644 (file)
@@ -142,6 +142,22 @@ bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorICSlot slot) {
 }
 
 
+bool TypeFeedbackOracle::CallIsBuiltinWithMinusZeroResult(
+    FeedbackVectorICSlot slot) {
+  Handle<Object> value = GetInfo(slot);
+  if (!value->IsJSFunction()) return false;
+  Handle<JSFunction> maybe_round(Handle<JSFunction>::cast(value));
+  if (!maybe_round->shared()->HasBuiltinFunctionId()) return false;
+  if (maybe_round->shared()->builtin_function_id() != kMathRound &&
+      maybe_round->shared()->builtin_function_id() != kMathFloor &&
+      maybe_round->shared()->builtin_function_id() != kMathCeil) {
+    return false;
+  }
+  return feedback_vector_->get(feedback_vector_->GetIndex(slot) + 1) ==
+         Smi::FromInt(CallICStub::kHasReturnedMinusZeroSentinel);
+}
+
+
 bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackVectorSlot slot) {
   Handle<Object> info = GetInfo(slot);
   return FLAG_pretenuring_call_new
index 76a45dc84770d9972f416dc6f4dec2b207c2e6ff..45c485c55af5e0d18d1d92a3e7d84325d6d5c6dd 100644 (file)
@@ -28,6 +28,7 @@ class TypeFeedbackOracle: public ZoneObject {
   bool StoreIsUninitialized(TypeFeedbackId id);
   bool CallIsUninitialized(FeedbackVectorICSlot slot);
   bool CallIsMonomorphic(FeedbackVectorICSlot slot);
+  bool CallIsBuiltinWithMinusZeroResult(FeedbackVectorICSlot slot);
   bool KeyedArrayCallIsHoley(TypeFeedbackId id);
   bool CallNewIsMonomorphic(FeedbackVectorSlot slot);
 
index ab015717e83c2dd9868dc9223296a44c0f9a4cfd..3613099524304a60bdde13ee494da4d8bd4c8205 100644 (file)
@@ -540,11 +540,15 @@ void AstTyper::VisitCall(Call* expr) {
   if (expr->IsUsingCallFeedbackICSlot(isolate())) {
     FeedbackVectorICSlot slot = expr->CallFeedbackICSlot();
     is_uninitialized = oracle()->CallIsUninitialized(slot);
-    if (!expr->expression()->IsProperty() &&
-        oracle()->CallIsMonomorphic(slot)) {
-      expr->set_target(oracle()->GetCallTarget(slot));
-      Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
-      expr->set_allocation_site(site);
+    if (oracle()->CallIsMonomorphic(slot)) {
+      if (oracle()->CallIsBuiltinWithMinusZeroResult(slot)) {
+        expr->MarkShouldHandleMinusZeroResult();
+      }
+      if (!expr->expression()->IsProperty()) {
+        expr->set_target(oracle()->GetCallTarget(slot));
+        Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
+        expr->set_allocation_site(site);
+      }
     }
   }
 
index ec6d53d01c20f7f6a268e60a35e18434b1f2edbc..fba12b14a328c891cacba2c8cd0a78f3ad215663 100644 (file)
@@ -2144,6 +2144,161 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+  Register function = rdi;
+  Register vector = rbx;
+  Register slot = rdx;
+
+  Register temp = rax;
+  XMMRegister xmm_temp1 = xmm1;
+  XMMRegister xmm_temp2 = xmm0;
+  Label tail, miss;
+
+  __ SmiToInteger64(slot, slot);
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp, kMathRound, &miss);
+
+  if (arg_count() > 0) {
+    __ movp(temp, Operand(rsp, arg_count() * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+    __ movsd(xmm_temp1, FieldOperand(temp, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ xorps(xmm_temp2, xmm_temp2);
+    __ ucomisd(xmm_temp1, xmm_temp2);
+    __ j(above, &tail, Label::kNear);
+
+    // If the number is <-.5, it doesn't round to -0
+    static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000);  // -0.5
+    __ movq(temp, minus_one_half);
+    __ movq(xmm_temp2, temp);
+    __ ucomisd(xmm_temp1, xmm_temp2);
+    __ j(below, &tail, Label::kNear);
+
+    // +0 doesn't round to -0
+    __ movmskpd(temp, xmm_temp1);
+    __ testl(temp, Immediate(1));
+    __ j(zero, &tail, Label::kNear);
+
+    __ Move(FieldOperand(vector, slot, times_pointer_size,
+                         FixedArray::kHeaderSize + kPointerSize),
+            Smi::FromInt(kHasReturnedMinusZeroSentinel));
+  }
+
+  __ bind(&tail);
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ int3();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ jmp(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+  Register function = rdi;
+  Register vector = rbx;
+  Register slot = rdx;
+
+  Register temp1 = rax;
+  Register temp2 = rsi;
+  Label tail, miss;
+
+  __ SmiToInteger64(slot, slot);
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+  if (arg_count() > 0) {
+    __ movp(temp1, Operand(rsp, arg_count() * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp1, map, &tail, DO_SMI_CHECK);
+
+    // Only -0 floors to -0.
+    __ movq(temp1, FieldOperand(temp1, HeapNumber::kValueOffset));
+    static int64_t minus_zero = V8_INT64_C(0x8000000000000000);  // -0.0
+    __ movq(temp2, minus_zero);
+    __ cmpq(temp1, temp2);
+    __ j(not_equal, &tail);
+
+    __ Move(FieldOperand(vector, slot, times_pointer_size,
+                         FixedArray::kHeaderSize + kPointerSize),
+            Smi::FromInt(kHasReturnedMinusZeroSentinel));
+  }
+
+  __ bind(&tail);
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ int3();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ jmp(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+  Register function = rdi;
+  Register vector = rbx;
+  Register slot = rdx;
+
+  Register temp = rax;
+  XMMRegister xmm_temp1 = xmm1;
+  XMMRegister xmm_temp2 = xmm0;
+  Label tail, miss;
+
+  __ SmiToInteger64(slot, slot);
+
+  // Ensure nobody has snuck in another function.
+  __ BranchIfNotBuiltin(function, temp, kMathCeil, &miss);
+
+  if (arg_count() > 0) {
+    __ movp(temp, Operand(rsp, arg_count() * kPointerSize));
+    Handle<Map> map = isolate()->factory()->heap_number_map();
+    __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+    __ movsd(xmm_temp1, FieldOperand(rax, HeapNumber::kValueOffset));
+
+    // If the number is >0, it doesn't round to -0
+    __ xorps(xmm_temp2, xmm_temp2);
+    __ ucomisd(xmm_temp1, xmm_temp2);
+    __ j(greater, &tail, Label::kNear);
+
+    // If the number is <=-1, it doesn't round to -0
+    static int64_t minus_one = V8_INT64_C(0xbff0000000000000);  // -1
+    __ movq(temp, minus_one);
+    __ movq(xmm_temp2, temp);
+    __ ucomisd(xmm_temp1, xmm_temp2);
+    __ j(less_equal, &tail, Label::kNear);
+
+    // +0 doesn't round to -0.
+    __ movmskpd(temp, xmm_temp1);
+    __ testq(temp, Immediate(1));
+    __ j(zero, &tail, Label::kNear);
+
+    __ Move(FieldOperand(vector, slot, times_pointer_size,
+                         FixedArray::kHeaderSize + kPointerSize),
+            Smi::FromInt(kHasReturnedMinusZeroSentinel));
+  }
+
+  __ bind(&tail);
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ int3();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+  __ jmp(&tail);
+}
+
+
 void CallICStub::Generate(MacroAssembler* masm) {
   // rdi - function
   // rdx - slot id
@@ -2253,6 +2408,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
   __ cmpp(rdi, rcx);
   __ j(equal, &miss);
 
+  __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(rax, FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+  __ Cmp(rax, Smi::FromInt(0));
+  __ j(not_equal, &miss);
+
   // Update stats.
   __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
 
@@ -4569,6 +4729,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
 }
 
 
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, rbx);
+  CallIC_RoundStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, rbx);
+  CallIC_FloorStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, rbx);
+  CallIC_CeilStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
index 0e70826df2297d003f28c3de27577394fd9b57a2..e6a06dbc08b5220b3230eed33de09cec662ac56b 100644 (file)
@@ -733,6 +733,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
 }
 
 
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+                                        BuiltinFunctionId id, Label* miss) {
+  movp(temp, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  movp(temp, FieldOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+  Cmp(temp, Smi::FromInt(id));
+  j(not_equal, miss);
+}
+
+
 #define REG(Name) { kRegister_ ## Name ## _Code }
 
 static const Register saved_regs[] = {
index 0016d99321e86168433fa0295493d6110aa1eae5..57dcf34ec1502a6852806a00cab52d688a355663 100644 (file)
@@ -378,6 +378,8 @@ class MacroAssembler: public Assembler {
   // Store the code object for the given builtin in the target register.
   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
 
+  void BranchIfNotBuiltin(Register function, Register temp,
+                          BuiltinFunctionId id, Label* miss);
 
   // ---------------------------------------------------------------------------
   // Smi tagging, untagging and operations on tagged smis.
diff --git a/test/mjsunit/math-ceil-minus-zero.js b/test/mjsunit/math-ceil-minus-zero.js
new file mode 100644 (file)
index 0000000..660497c
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+// Test that a -0 result doesn't cause deopt loops
+function noDeoptLoop(x) {
+  return Math.ceil(x);
+}
+assertEquals(1, noDeoptLoop(0.4));
+assertEquals(1, noDeoptLoop(0.4));
+assertEquals(1, noDeoptLoop(0.4));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(1, noDeoptLoop(0.4));
+assertEquals(1, noDeoptLoop(0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertUnoptimized(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertEquals(-1.0, 1/noDeoptLoop(-1.0));
+assertEquals(Infinity, 1/noDeoptLoop(0));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.1));
+assertOptimized(noDeoptLoop);
+%ClearFunctionTypeFeedback(noDeoptLoop);
+%DeoptimizeFunction(noDeoptLoop);
+
+// Test that ceil that goes megamorphic is handled correctly.
+function notCeil(x) {
+  return -x;
+}
+function testMega(f, x) {
+  return f(x);
+}
+assertEquals(8, testMega(Math.ceil, 7.4));
+assertEquals(8, testMega(Math.ceil, 7.4));
+assertEquals(8, testMega(Math.ceil, 7.4));
+assertEquals(-7.4, testMega(notCeil, 7.4));
+
+// Make sure that we can learn about ceil specialization from Cranskhaft, which
+// doesn't insert soft deopts for CallICs.
+function crankCeilLearn(x) {
+  return Math.ceil(x);
+}
+%OptimizeFunctionOnNextCall(crankCeilLearn);
+assertEquals(12, crankCeilLearn(11.3));
+assertOptimized(crankCeilLearn);
+assertEquals(-Infinity, 1/crankCeilLearn(-0.0));
+assertOptimized(crankCeilLearn);
+assertEquals(-Infinity, 1/crankCeilLearn(-0.75));
diff --git a/test/mjsunit/math-floor-minus-zero.js b/test/mjsunit/math-floor-minus-zero.js
new file mode 100644 (file)
index 0000000..5488f80
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+// Test that a -0 result doesn't cause deopt loops
+function noDeoptLoop(x) {
+  return Math.floor(x);
+}
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertUnoptimized(noDeoptLoop);
+assertEquals(-1, 1/noDeoptLoop(-1.0));
+assertEquals(-1, 1/noDeoptLoop(-0.9));
+assertEquals(-1, 1/noDeoptLoop(-0.4));
+assertEquals(-1, 1/noDeoptLoop(-0.5));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertOptimized(noDeoptLoop);
+%ClearFunctionTypeFeedback(noDeoptLoop);
+%DeoptimizeFunction(noDeoptLoop);
+
+// Test that floor that goes megamorphic is handled correctly.
+function notFloor(x) {
+  return -x;
+}
+function testMega(f, x) {
+  return f(x);
+}
+assertEquals(7, testMega(Math.floor, 7.4));
+assertEquals(7, testMega(Math.floor, 7.4));
+assertEquals(7, testMega(Math.floor, 7.4));
+assertEquals(-7.4, testMega(notFloor, 7.4));
+
+// Make sure that we can learn about floor specialization from Cranskhaft, which
+// doesn't insert soft deopts for CallICs.
+function crankFloorLearn(x) {
+  return Math.floor(x);
+}
+%OptimizeFunctionOnNextCall(crankFloorLearn);
+assertEquals(12, crankFloorLearn(12.3));
+assertOptimized(crankFloorLearn);
+assertEquals(-Infinity, 1/crankFloorLearn(-0.0));
+assertOptimized(crankFloorLearn);
+assertEquals(-Infinity, 1/crankFloorLearn(-0.0));
diff --git a/test/mjsunit/math-round-minus-zero.js b/test/mjsunit/math-round-minus-zero.js
new file mode 100644 (file)
index 0000000..2b1ab2f
--- /dev/null
@@ -0,0 +1,76 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+// Test that a -0 result doesn't cause deopt loops
+function noDeoptLoop(x) {
+  return Math.round(x);
+}
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.4));
+assertUnoptimized(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.5));
+assertEquals(-1, noDeoptLoop(-1));
+assertEquals(-1, noDeoptLoop(-0.51));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.4));
+assertOptimized(noDeoptLoop);
+%ClearFunctionTypeFeedback(noDeoptLoop);
+%DeoptimizeFunction(noDeoptLoop);
+
+// Test that round that goes megamorphic is handled correctly.
+function notRound(x) {
+  return -x;
+}
+function testMega(f, x) {
+  return f(x);
+}
+assertEquals(7, testMega(Math.round, 7.4));
+assertEquals(7, testMega(Math.round, 7.4));
+assertEquals(7, testMega(Math.round, 7.4));
+assertEquals(-7.4, testMega(notRound, 7.4));
+
+// Make sure that we can learn about round specialization from Cranskhaft, which
+// doesn't insert soft deopts for CallICs.
+function crankRoundLearn(x) {
+  return Math.round(x);
+}
+%OptimizeFunctionOnNextCall(crankRoundLearn);
+assertEquals(12, crankRoundLearn(12.3));
+assertOptimized(crankRoundLearn);
+assertEquals(-Infinity, 1/crankRoundLearn(-0.4));
+assertOptimized(crankRoundLearn);
+assertEquals(-Infinity, 1/crankRoundLearn(-0.4));