From f36ecaf3a4d61568ca50a20718acce7dd5da9a5f Mon Sep 17 00:00:00 2001 From: danno Date: Tue, 5 May 2015 00:55:51 -0700 Subject: [PATCH] Collect type feedback on result of Math.[round|ceil|floor] By recording invocations of these builtins that can return -0, we now learn to not emit Crankshaft code that only handles integer results, avoiding deopt loops. Review URL: https://codereview.chromium.org/1053143005 Cr-Commit-Position: refs/heads/master@{#28215} --- src/arm/code-stubs-arm.cc | 187 ++++++++++++++++++++++++++ src/arm/macro-assembler-arm.cc | 9 ++ src/arm/macro-assembler-arm.h | 3 + src/arm64/code-stubs-arm64.cc | 183 +++++++++++++++++++++++++ src/arm64/macro-assembler-arm64.cc | 9 ++ src/arm64/macro-assembler-arm64.h | 3 + src/assembler.cc | 8 ++ src/assembler.h | 1 + src/ast.h | 11 +- src/code-stubs.cc | 15 +++ src/code-stubs.h | 80 +++++++++++ src/hydrogen.cc | 67 +++++++-- src/hydrogen.h | 1 - src/ia32/code-stubs-ia32.cc | 179 ++++++++++++++++++++++++ src/ia32/macro-assembler-ia32.cc | 9 ++ src/ia32/macro-assembler-ia32.h | 3 + src/ic/ic.cc | 44 +++++- src/math.js | 8 +- src/mips/code-stubs-mips.cc | 175 ++++++++++++++++++++++++ src/mips/macro-assembler-mips.cc | 9 ++ src/mips/macro-assembler-mips.h | 3 + src/mips64/code-stubs-mips64.cc | 175 ++++++++++++++++++++++++ src/mips64/macro-assembler-mips64.cc | 8 ++ src/mips64/macro-assembler-mips64.h | 3 + src/snapshot/serialize.cc | 2 + src/type-feedback-vector.cc | 13 +- src/type-feedback-vector.h | 1 + src/type-info.cc | 16 +++ src/type-info.h | 1 + src/typing.cc | 14 +- src/x64/code-stubs-x64.cc | 181 +++++++++++++++++++++++++ src/x64/macro-assembler-x64.cc | 9 ++ src/x64/macro-assembler-x64.h | 2 + test/mjsunit/math-ceil-minus-zero.js | 75 +++++++++++ test/mjsunit/math-floor-minus-zero.js | 75 +++++++++++ test/mjsunit/math-round-minus-zero.js | 76 +++++++++++ 36 files changed, 1634 insertions(+), 24 deletions(-) create mode 100644 test/mjsunit/math-ceil-minus-zero.js create mode 100644 test/mjsunit/math-floor-minus-zero.js create mode 100644 test/mjsunit/math-round-minus-zero.js diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index be55f0b06..f7f8270e5 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -2713,6 +2713,166 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundStub::Generate(MacroAssembler* masm) { + Register function = r1; + Register vector = r2; + Register slot = r3; + + Register temp1 = r0; + Register temp2 = r4; + DwVfpRegister double_temp1 = d1; + DwVfpRegister double_temp2 = d2; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss); + + if (arg_count() > 0) { + __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ sub(temp1, temp1, Operand(kHeapObjectTag)); + __ vldr(double_temp1, temp1, HeapNumber::kValueOffset); + + // If the number is >0, it doesn't round to -0 + __ Vmov(double_temp2, 0, temp1); + __ VFPCompareAndSetFlags(double_temp1, double_temp2); + __ b(gt, &tail); + + // If the number is <-.5, it doesn't round to -0 + __ Vmov(double_temp2, -.5, temp1); + __ VFPCompareAndSetFlags(double_temp1, double_temp2); + __ b(lt, &tail); + + // +0 doesn't round to -0 + __ VmovHigh(temp1, double_temp1); + __ cmp(temp1, Operand(0x80000000)); + __ b(ne, &tail); + + __ mov(temp1, Operand(slot, LSL, 1)); + __ add(temp1, temp1, vector); + __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ str(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ b(&tail); +} + + +void CallIC_FloorStub::Generate(MacroAssembler* masm) { + Register function = r1; + Register vector = r2; + Register slot = r3; + + Register temp1 = r0; + Register temp2 = r4; + DwVfpRegister double_temp = d1; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss); + + if (arg_count() > 0) { + __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ sub(temp1, temp1, Operand(kHeapObjectTag)); + __ vldr(double_temp, temp1, HeapNumber::kValueOffset); + + // Only -0 floors to -0. + __ VmovHigh(temp1, double_temp); + __ cmp(temp1, Operand(0x80000000)); + __ b(ne, &tail); + __ VmovLow(temp1, double_temp); + __ cmp(temp1, Operand(0)); + __ b(ne, &tail); + + __ mov(temp1, Operand(slot, LSL, 1)); + __ add(temp1, temp1, vector); + __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ str(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ b(&tail); +} + + +void CallIC_CeilStub::Generate(MacroAssembler* masm) { + Register function = r1; + Register vector = r2; + Register slot = r3; + + Register temp1 = r0; + Register temp2 = r4; + DwVfpRegister double_temp1 = d1; + DwVfpRegister double_temp2 = d2; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss); + + if (arg_count() > 0) { + __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ sub(temp1, temp1, Operand(kHeapObjectTag)); + __ vldr(double_temp1, temp1, HeapNumber::kValueOffset); + + // If the number is >0, it doesn't round to -0 + __ Vmov(double_temp2, 0, temp1); + __ VFPCompareAndSetFlags(double_temp1, double_temp2); + __ b(gt, &tail); + + // If the number is <=-1, it doesn't round to -0 + __ Vmov(double_temp2, -1, temp1); + __ VFPCompareAndSetFlags(double_temp1, double_temp2); + __ b(le, &tail); + + // +0 doesn't round to -0. + __ VmovHigh(temp1, double_temp1); + __ cmp(temp1, Operand(0x80000000)); + __ b(ne, &tail); + + __ mov(temp1, Operand(slot, LSL, 1)); + __ add(temp1, temp1, vector); + __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ str(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ b(&tail); +} + + void CallICStub::Generate(MacroAssembler* masm) { // r1 - function // r3 - slot id (Smi) @@ -2823,6 +2983,12 @@ void CallICStub::Generate(MacroAssembler* masm) { __ cmp(r1, r4); __ b(eq, &miss); + // Some builtin functions require special handling, miss to the runtime. + __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset)); + __ cmp(r0, Operand(Smi::FromInt(0))); + __ b(ne, &miss); + // Update stats. __ ldr(r4, FieldMemOperand(r2, with_types_offset)); __ add(r4, r4, Operand(Smi::FromInt(1))); @@ -4366,6 +4532,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, r2); + CallIC_RoundStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, r2); + CallIC_FloorStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, r2); + CallIC_CeilStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + void VectorRawLoadStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index 12108a004..2a719f540 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -2589,6 +2589,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { } +void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss) { + ldr(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); + ldr(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset)); + cmp(temp, Operand(Smi::FromInt(id))); + b(ne, miss); +} + + void MacroAssembler::SetCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { if (FLAG_native_code_counters && counter->Enabled()) { diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index e6047adc4..9021c62c1 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -1145,6 +1145,9 @@ class MacroAssembler: public Assembler { // Store the function for the given builtin in the target register. void GetBuiltinFunction(Register target, Builtins::JavaScript id); + void BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss); + Handle CodeObject() { DCHECK(!code_object_.is_null()); return code_object_; diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc index 299c48b1d..d3f6158f0 100644 --- a/src/arm64/code-stubs-arm64.cc +++ b/src/arm64/code-stubs-arm64.cc @@ -3112,6 +3112,162 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundStub::Generate(MacroAssembler* masm) { + Register function = x1; + Register vector = x2; + Register slot = x3; + + Register temp1 = x0; + Register temp2 = x4; + DoubleRegister double_temp1 = d1; + DoubleRegister double_temp2 = d2; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss); + + if (arg_count() > 0) { + __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ Sub(temp1, temp1, Operand(kHeapObjectTag)); + __ Ldr(double_temp1, MemOperand(temp1, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ Fmov(double_temp2, 0); + __ Fcmp(double_temp1, double_temp2); + __ B(gt, &tail); + + // If the number is <-.5, it doesn't round to -0 + __ Fmov(double_temp2, -.5); + __ Fcmp(double_temp1, double_temp2); + __ B(lt, &tail); + + __ Fmov(temp1, double_temp1); + __ Cmp(temp1, Operand(0x8000000000000000)); + __ B(ne, &tail); + + __ SmiUntag(temp1, slot); + __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2)); + __ Add(temp1, temp1, vector); + __ Mov(temp2, Smi::FromInt(kHasReturnedMinusZeroSentinel)); + __ Str(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + __ Unreachable(); + + __ bind(&miss); + GenerateMiss(masm); + __ b(&tail); +} + + +void CallIC_FloorStub::Generate(MacroAssembler* masm) { + Register function = x1; + Register vector = x2; + Register slot = x3; + + Register temp1 = x0; + Register temp2 = x4; + DoubleRegister double_temp = d1; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss); + + if (arg_count() > 0) { + __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ Sub(temp1, temp1, Operand(kHeapObjectTag)); + __ Ldr(double_temp, MemOperand(temp1, HeapNumber::kValueOffset)); + + // Only -0 floors to -0. + __ Fmov(temp1, double_temp); + __ Cmp(temp1, Operand(0x8000000000000000)); + __ B(ne, &tail); + + __ SmiUntag(temp1, slot); + __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2)); + __ Add(temp1, temp1, vector); + __ Mov(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ Str(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + __ Unreachable(); + + __ bind(&miss); + GenerateMiss(masm); + __ b(&tail); +} + + +void CallIC_CeilStub::Generate(MacroAssembler* masm) { + Register function = x1; + Register vector = x2; + Register slot = x3; + + Register temp1 = x0; + Register temp2 = x4; + DoubleRegister double_temp1 = d1; + DoubleRegister double_temp2 = d2; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss); + + if (arg_count() > 0) { + __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ Sub(temp1, temp1, Operand(kHeapObjectTag)); + __ Ldr(double_temp1, MemOperand(temp1, HeapNumber::kValueOffset)); + + // If the number is positive, it doesn't ceil to -0 + __ Fmov(double_temp2, 0); + __ Fcmp(double_temp1, double_temp2); + __ B(gt, &tail); + + // If it's less or equal to 1, it doesn't ceil to -0 + __ Fmov(double_temp2, -1); + __ Fcmp(double_temp1, double_temp2); + __ B(le, &tail); + + // +Zero doesn't round to -0 + __ Fmov(temp1, double_temp1); + __ Cmp(temp1, Operand(0x8000000000000000)); + __ B(ne, &tail); + + __ SmiUntag(temp1, slot); + __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2)); + __ Add(temp1, temp1, vector); + __ Mov(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ Str(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + __ Unreachable(); + + __ bind(&miss); + GenerateMiss(masm); + __ b(&tail); +} + + void CallICStub::Generate(MacroAssembler* masm) { ASM_LOCATION("CallICStub"); @@ -3230,6 +3386,12 @@ void CallICStub::Generate(MacroAssembler* masm) { __ Cmp(function, x5); __ B(eq, &miss); + // Some builtin functions require special handling, miss to the runtime. + __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldr(x0, FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset)); + __ Cmp(x0, Operand(Smi::FromInt(0))); + __ B(ne, &miss); + // Update stats. __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset)); __ Adds(x4, x4, Operand(Smi::FromInt(1))); @@ -4499,6 +4661,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, x2); + CallIC_RoundStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, x2); + CallIC_FloorStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, x2); + CallIC_CeilStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + void VectorRawLoadStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc index 07e237e0b..52cc3fdd4 100644 --- a/src/arm64/macro-assembler-arm64.cc +++ b/src/arm64/macro-assembler-arm64.cc @@ -1689,6 +1689,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, } +void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss) { + Ldr(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); + Ldr(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset)); + Cmp(temp, Operand(Smi::FromInt(id))); + B(ne, miss); +} + + void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h index 1160c40bf..ccb1ec957 100644 --- a/src/arm64/macro-assembler-arm64.h +++ b/src/arm64/macro-assembler-arm64.h @@ -1143,6 +1143,9 @@ class MacroAssembler : public Assembler { // Store the function for the given builtin in the target register. void GetBuiltinFunction(Register target, Builtins::JavaScript id); + void BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss); + void Jump(Register target); void Jump(Address target, RelocInfo::Mode rmode); void Jump(Handle code, RelocInfo::Mode rmode); diff --git a/src/assembler.cc b/src/assembler.cc index 1464074b8..038363c7c 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -109,6 +109,7 @@ namespace internal { struct DoubleConstant BASE_EMBEDDED { double min_int; double one_half; +double minus_one; double minus_one_half; double negative_infinity; double the_hole_nan; @@ -950,6 +951,7 @@ void RelocInfo::Verify(Isolate* isolate) { void ExternalReference::SetUp() { double_constants.min_int = kMinInt; double_constants.one_half = 0.5; + double_constants.minus_one = -1; double_constants.minus_one_half = -0.5; double_constants.the_hole_nan = bit_cast(kHoleNanInt64); double_constants.negative_infinity = -V8_INFINITY; @@ -1274,6 +1276,12 @@ ExternalReference ExternalReference::address_of_minus_one_half() { } +ExternalReference ExternalReference::address_of_minus_one() { + return ExternalReference( + reinterpret_cast(&double_constants.minus_one)); +} + + ExternalReference ExternalReference::address_of_negative_infinity() { return ExternalReference( reinterpret_cast(&double_constants.negative_infinity)); diff --git a/src/assembler.h b/src/assembler.h index fd66e0bfd..9fe410716 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -957,6 +957,7 @@ class ExternalReference BASE_EMBEDDED { // Static variables containing common double constants. static ExternalReference address_of_min_int(); static ExternalReference address_of_one_half(); + static ExternalReference address_of_minus_one(); static ExternalReference address_of_minus_one_half(); static ExternalReference address_of_negative_infinity(); static ExternalReference address_of_the_hole_nan(); diff --git a/src/ast.h b/src/ast.h index a686f9130..0cb506b47 100644 --- a/src/ast.h +++ b/src/ast.h @@ -1818,6 +1818,13 @@ class Call final : public Expression { bit_field_ = IsUninitializedField::update(bit_field_, b); } + void MarkShouldHandleMinusZeroResult() { + bit_field_ = ShouldHandleMinusZeroResultField::update(bit_field_, true); + } + bool ShouldHandleMinusZeroResult() { + return ShouldHandleMinusZeroResultField::decode(bit_field_); + } + enum CallType { POSSIBLY_EVAL_CALL, GLOBAL_CALL, @@ -1844,7 +1851,8 @@ class Call final : public Expression { ic_slot_or_slot_(FeedbackVectorICSlot::Invalid().ToInt()), expression_(expression), arguments_(arguments), - bit_field_(IsUninitializedField::encode(false)) { + bit_field_(IsUninitializedField::encode(false) | + ShouldHandleMinusZeroResultField::encode(false)) { if (expression->IsProperty()) { expression->AsProperty()->mark_for_call(); } @@ -1862,6 +1870,7 @@ class Call final : public Expression { Handle target_; Handle allocation_site_; class IsUninitializedField : public BitField8 {}; + class ShouldHandleMinusZeroResultField : public BitField8 {}; uint8_t bit_field_; }; diff --git a/src/code-stubs.cc b/src/code-stubs.cc index 46d8342c5..045c0e785 100644 --- a/src/code-stubs.cc +++ b/src/code-stubs.cc @@ -538,6 +538,21 @@ Type* CompareNilICStub::GetInputType(Zone* zone, Handle map) { } +void CallIC_RoundStub::PrintState(std::ostream& os) const { // NOLINT + os << state() << " (Round)"; +} + + +void CallIC_FloorStub::PrintState(std::ostream& os) const { // NOLINT + os << state() << " (Floor)"; +} + + +void CallIC_CeilStub::PrintState(std::ostream& os) const { // NOLINT + os << state() << " (Ceil)"; +} + + void CallIC_ArrayStub::PrintState(std::ostream& os) const { // NOLINT os << state() << " (Array)"; } diff --git a/src/code-stubs.h b/src/code-stubs.h index c6767e9b8..aa9ba41b9 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -30,6 +30,9 @@ namespace internal { V(CallFunction) \ V(CallIC) \ V(CallIC_Array) \ + V(CallIC_Round) \ + V(CallIC_Floor) \ + V(CallIC_Ceil) \ V(CEntry) \ V(CompareIC) \ V(DoubleToI) \ @@ -41,6 +44,9 @@ namespace internal { V(LoadICTrampoline) \ V(CallICTrampoline) \ V(CallIC_ArrayTrampoline) \ + V(CallIC_RoundTrampoline) \ + V(CallIC_FloorTrampoline) \ + V(CallIC_CeilTrampoline) \ V(LoadIndexedInterceptor) \ V(LoadIndexedString) \ V(MathPow) \ @@ -870,6 +876,8 @@ class CallICStub: public PlatformCodeStub { return static_cast(minor_key_); } + static const int kHasReturnedMinusZeroSentinel = 1; + protected: bool CallAsMethod() const { return state().call_type() == CallICState::METHOD; @@ -892,6 +900,48 @@ class CallICStub: public PlatformCodeStub { }; +class CallIC_RoundStub : public CallICStub { + public: + CallIC_RoundStub(Isolate* isolate, const CallICState& state_in) + : CallICStub(isolate, state_in) {} + + InlineCacheState GetICState() const final { return MONOMORPHIC; } + + private: + void PrintState(std::ostream& os) const override; // NOLINT + + DEFINE_PLATFORM_CODE_STUB(CallIC_Round, CallICStub); +}; + + +class CallIC_FloorStub : public CallICStub { + public: + CallIC_FloorStub(Isolate* isolate, const CallICState& state_in) + : CallICStub(isolate, state_in) {} + + InlineCacheState GetICState() const final { return MONOMORPHIC; } + + private: + void PrintState(std::ostream& os) const override; // NOLINT + + DEFINE_PLATFORM_CODE_STUB(CallIC_Floor, CallICStub); +}; + + +class CallIC_CeilStub : public CallICStub { + public: + CallIC_CeilStub(Isolate* isolate, const CallICState& state_in) + : CallICStub(isolate, state_in) {} + + InlineCacheState GetICState() const final { return MONOMORPHIC; } + + private: + void PrintState(std::ostream& os) const override; // NOLINT + + DEFINE_PLATFORM_CODE_STUB(CallIC_Ceil, CallICStub); +}; + + class CallIC_ArrayStub: public CallICStub { public: CallIC_ArrayStub(Isolate* isolate, const CallICState& state_in) @@ -2104,6 +2154,36 @@ class CallIC_ArrayTrampolineStub : public CallICTrampolineStub { }; +class CallIC_RoundTrampolineStub : public CallICTrampolineStub { + public: + CallIC_RoundTrampolineStub(Isolate* isolate, const CallICState& state) + : CallICTrampolineStub(isolate, state) {} + + private: + DEFINE_PLATFORM_CODE_STUB(CallIC_RoundTrampoline, CallICTrampolineStub); +}; + + +class CallIC_FloorTrampolineStub : public CallICTrampolineStub { + public: + CallIC_FloorTrampolineStub(Isolate* isolate, const CallICState& state) + : CallICTrampolineStub(isolate, state) {} + + private: + DEFINE_PLATFORM_CODE_STUB(CallIC_FloorTrampoline, CallICTrampolineStub); +}; + + +class CallIC_CeilTrampolineStub : public CallICTrampolineStub { + public: + CallIC_CeilTrampolineStub(Isolate* isolate, const CallICState& state) + : CallICTrampolineStub(isolate, state) {} + + private: + DEFINE_PLATFORM_CODE_STUB(CallIC_CeilTrampoline, CallICTrampolineStub); +}; + + class MegamorphicLoadStub : public HydrogenCodeStub { public: MegamorphicLoadStub(Isolate* isolate, const LoadICState& state) diff --git a/src/hydrogen.cc b/src/hydrogen.cc index 9bdc07030..15c83c6cf 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -8341,8 +8341,13 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) { if (!FLAG_fast_math) break; // Fall through if FLAG_fast_math. case kMathRound: - case kMathFround: case kMathFloor: + // If round has seen minus zero, don't inline, since that assumes + // returned value is an integer, which -0 definitely is not. + if (expr->ShouldHandleMinusZeroResult()) { + break; + } + case kMathFround: case kMathAbs: case kMathSqrt: case kMathLog: @@ -8355,6 +8360,28 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) { return true; } break; + case kMathCeil: + // If round/floor has seen minus zero, don't inline, since that assumes + // returned value is an integer, which -0 definitely is not. + if (expr->ShouldHandleMinusZeroResult()) { + break; + } + if (expr->arguments()->length() == 1) { + HValue* argument = Pop(); + Drop(2); // Receiver and function. + HValue* op = NULL; + { + NoObservableSideEffectsScope s(this); + HValue* neg_arg = + AddUncasted(graph()->GetConstantMinus1(), argument); + op = AddUncasted(neg_arg, kMathFloor); + } + HInstruction* neg_op = + NewUncasted(graph()->GetConstantMinus1(), op); + ast_context()->ReturnInstruction(neg_op, expr->id()); + return true; + } + break; case kMathImul: if (expr->arguments()->length() == 2) { HValue* right = Pop(); @@ -8448,8 +8475,13 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( if (!FLAG_fast_math) break; // Fall through if FLAG_fast_math. case kMathRound: - case kMathFround: case kMathFloor: + // If round/floor has seen minus zero, don't inline, since that assumes + // returned value is an integer, which -0 definitely is not. + if (expr->ShouldHandleMinusZeroResult()) { + break; + } + case kMathFround: case kMathAbs: case kMathSqrt: case kMathLog: @@ -8462,6 +8494,28 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( return true; } break; + case kMathCeil: + // If round/floor has seen minus zero, don't inline, since that assumes + // returned value is an integer, which -0 definitely is not. + if (expr->ShouldHandleMinusZeroResult()) { + break; + } + if (argument_count == 2) { + HValue* argument = Pop(); + Drop(2); // Receiver and function. + HValue* op = NULL; + { + NoObservableSideEffectsScope s(this); + HValue* neg_arg = + AddUncasted(graph()->GetConstantMinus1(), argument); + op = AddUncasted(neg_arg, kMathFloor); + } + HInstruction* neg_op = + NewUncasted(graph()->GetConstantMinus1(), op); + ast_context()->ReturnInstruction(neg_op, expr->id()); + return true; + } + break; case kMathPow: if (argument_count == 3) { HValue* right = Pop(); @@ -12209,15 +12263,6 @@ void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) { } -void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) { - DCHECK(call->arguments()->length() == 1); - CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); - HValue* value = Pop(); - HInstruction* result = NewUncasted(value, kMathFloor); - return ast_context()->ReturnInstruction(result, call->id()); -} - - void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) { DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); diff --git a/src/hydrogen.h b/src/hydrogen.h index 3eb34a2d7..2b9603241 100644 --- a/src/hydrogen.h +++ b/src/hydrogen.h @@ -2225,7 +2225,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor { F(DoubleHi) \ F(DoubleLo) \ F(MathClz32) \ - F(MathFloor) \ F(MathSqrt) \ F(MathLogRT) \ /* ES6 Collections */ \ diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 727fae5b8..5687435cc 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -2265,6 +2265,157 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundStub::Generate(MacroAssembler* masm) { + Register function = edi; + Register vector = ebx; + Register slot = edx; + + Register temp = eax; + XMMRegister xmm_temp1 = xmm0; + XMMRegister xmm_temp2 = xmm1; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp, kMathRound, &miss); + + if (arg_count() > 0) { + ExternalReference minus_one_half = + ExternalReference::address_of_minus_one_half(); + + __ mov(temp, Operand(esp, arg_count() * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp, map, &tail, DO_SMI_CHECK); + + // If the number is positive, it doesn't round to -0 + __ movsd(xmm_temp1, FieldOperand(eax, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ xorps(xmm_temp2, xmm_temp2); + __ ucomisd(xmm_temp1, xmm_temp2); + __ j(above, &tail, Label::kNear); + + // If the number is <-.5, it doesn't round to -0 + __ movsd(xmm_temp2, Operand::StaticVariable(minus_one_half)); + __ ucomisd(xmm_temp1, xmm_temp2); + __ j(below, &tail, Label::kNear); + + // The only positive result remaining is 0, it doesn't round to -0.. + __ movmskpd(temp, xmm_temp1); + __ test(temp, Immediate(1)); + __ j(zero, &tail, Label::kNear); + + __ mov(FieldOperand(vector, slot, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize), + Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + } + + __ bind(&tail); + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ int3(); + + __ bind(&miss); + GenerateMiss(masm); + __ jmp(&tail); +} + + +void CallIC_FloorStub::Generate(MacroAssembler* masm) { + Register function = edi; + Register vector = ebx; + Register slot = edx; + + Register temp = eax; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp, kMathFloor, &miss); + + if (arg_count() > 0) { + __ mov(temp, Operand(esp, arg_count() * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp, map, &tail, DO_SMI_CHECK); + + // The only number that floors to -0 is -0. + __ cmp(FieldOperand(temp, HeapNumber::kExponentOffset), + Immediate(0x80000000)); + __ j(not_equal, &tail); + + __ cmp(FieldOperand(temp, HeapNumber::kMantissaOffset), Immediate(0)); + __ j(not_equal, &tail); + + __ mov(FieldOperand(vector, slot, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize), + Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + } + + __ bind(&tail); + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ int3(); + + __ bind(&miss); + GenerateMiss(masm); + __ jmp(&tail); +} + + +void CallIC_CeilStub::Generate(MacroAssembler* masm) { + Register function = edi; + Register vector = ebx; + Register slot = edx; + + Register temp = eax; + XMMRegister xmm_temp1 = xmm0; + XMMRegister xmm_temp2 = xmm1; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp, kMathCeil, &miss); + + if (arg_count() > 0) { + ExternalReference minus_one = ExternalReference::address_of_minus_one(); + + __ mov(temp, Operand(esp, arg_count() * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp, map, &tail, DO_SMI_CHECK); + + __ movsd(xmm_temp1, FieldOperand(eax, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ xorps(xmm_temp2, xmm_temp2); + __ ucomisd(xmm_temp1, xmm_temp2); + __ j(greater, &tail, Label::kNear); + + // If the number is <=-1, it doesn't round to -0 + __ movsd(xmm_temp2, Operand::StaticVariable(minus_one)); + __ ucomisd(xmm_temp1, xmm_temp2); + __ j(less_equal, &tail, Label::kNear); + + // The only positive result remaining is 0, it doesn't round to -0.. + __ movmskpd(temp, xmm_temp1); + __ test(temp, Immediate(1)); + __ j(zero, &tail, Label::kNear); + + __ mov(FieldOperand(vector, slot, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize), + Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + } + + __ bind(&tail); + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ int3(); + + __ bind(&miss); + GenerateMiss(masm); + __ jmp(&tail); +} + + void CallICStub::Generate(MacroAssembler* masm) { // edi - function // edx - slot id @@ -2373,6 +2524,13 @@ void CallICStub::Generate(MacroAssembler* masm) { __ cmp(edi, ecx); __ j(equal, &miss); + // Make sure that the function is not Math.floor, Math.round or Math.ceil + // which have special CallICs to handle -0.0. + __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset)); + __ cmp(eax, Immediate(Smi::FromInt(0))); + __ j(not_equal, &miss); + // Update stats. __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1))); @@ -4649,6 +4807,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, ebx); + CallIC_RoundStub stub(isolate(), state()); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, ebx); + CallIC_FloorStub stub(isolate(), state()); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, ebx); + CallIC_CeilStub stub(isolate(), state()); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { ProfileEntryHookStub stub(masm->isolate()); diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index 4d599eef8..2aab2e687 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -2141,6 +2141,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { } +void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss) { + mov(temp, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); + mov(temp, FieldOperand(temp, SharedFunctionInfo::kFunctionDataOffset)); + cmp(temp, Immediate(Smi::FromInt(id))); + j(not_equal, miss); +} + + void MacroAssembler::LoadContext(Register dst, int context_chain_length) { if (context_chain_length > 0) { // Move up the chain of contexts to the context containing the slot. diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index 69b0c5f5a..2aeb36ee9 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -353,6 +353,9 @@ class MacroAssembler: public Assembler { // Store the code object for the given builtin in the target register. void GetBuiltinEntry(Register target, Builtins::JavaScript id); + void BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss); + // Expression support // cvtsi2sd instruction only writes to the low 64-bit of dst register, which // hinders register renaming and makes dependence chains longer. So we use diff --git a/src/ic/ic.cc b/src/ic/ic.cc index 69fe5f8b2..e35fea549 100644 --- a/src/ic/ic.cc +++ b/src/ic/ic.cc @@ -2223,9 +2223,9 @@ bool CallIC::DoCustomHandler(Handle function, // Are we the array function? Handle array_function = Handle(isolate()->native_context()->array_function()); + CallICNexus* nexus = casted_nexus(); if (array_function.is_identical_to(Handle::cast(function))) { // Alter the slot. - CallICNexus* nexus = casted_nexus(); nexus->ConfigureMonomorphicArray(); // Vector-based ICs have a different calling convention in optimized code @@ -2247,6 +2247,48 @@ bool CallIC::DoCustomHandler(Handle function, OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(), MONOMORPHIC); return true; + } else { + Handle maybe_builtin(Handle::cast(function)); + if (maybe_builtin->shared()->HasBuiltinFunctionId()) { + BuiltinFunctionId id = maybe_builtin->shared()->builtin_function_id(); + switch (id) { + case kMathRound: { + nexus->ConfigureMonomorphicMathFunction(maybe_builtin); + if (AddressIsOptimizedCode()) { + CallIC_RoundStub stub(isolate(), callic_state); + set_target(*stub.GetCode()); + } else { + CallIC_RoundTrampolineStub stub(isolate(), callic_state); + set_target(*stub.GetCode()); + } + return true; + } + case kMathFloor: + nexus->ConfigureMonomorphicMathFunction(maybe_builtin); + if (AddressIsOptimizedCode()) { + CallIC_FloorStub stub(isolate(), callic_state); + set_target(*stub.GetCode()); + } else { + CallIC_FloorTrampolineStub stub(isolate(), callic_state); + set_target(*stub.GetCode()); + } + return true; + break; + case kMathCeil: + nexus->ConfigureMonomorphicMathFunction(maybe_builtin); + if (AddressIsOptimizedCode()) { + CallIC_CeilStub stub(isolate(), callic_state); + set_target(*stub.GetCode()); + } else { + CallIC_CeilTrampolineStub stub(isolate(), callic_state); + set_target(*stub.GetCode()); + } + return true; + break; + default: + break; + } + } } return false; } diff --git a/src/math.js b/src/math.js index 1a5a1056f..13bcf0f65 100644 --- a/src/math.js +++ b/src/math.js @@ -52,7 +52,7 @@ function MathAtan2JS(y, x) { // ECMA 262 - 15.8.2.6 function MathCeil(x) { - return -%_MathFloor(-x); + return -MathFloorJS(-x); } // ECMA 262 - 15.8.2.8 @@ -62,7 +62,7 @@ function MathExp(x) { // ECMA 262 - 15.8.2.9 function MathFloorJS(x) { - return %_MathFloor(+x); + return %MathFloor(+x); } // ECMA 262 - 15.8.2.10 @@ -167,8 +167,8 @@ function MathSign(x) { // ES6 draft 09-27-13, section 20.2.2.34. function MathTrunc(x) { x = +x; - if (x > 0) return %_MathFloor(x); - if (x < 0) return -%_MathFloor(-x); + if (x > 0) return MathFloorJS(x); + if (x < 0) return -MathFloorJS(-x); // -0, 0 or NaN. return x; } diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc index 84f64e098..711e60e3c 100644 --- a/src/mips/code-stubs-mips.cc +++ b/src/mips/code-stubs-mips.cc @@ -2853,6 +2853,155 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundStub::Generate(MacroAssembler* masm) { + Register function = a1; + Register vector = a2; + Register slot = a3; + + Register temp1 = a0; + Register temp2 = t0; + DoubleRegister double_temp1 = f12; + DoubleRegister double_temp2 = f14; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss); + + if (arg_count() > 0) { + __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ Move(double_temp2, 0.0); + __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2); + + // If the number is <-.5, it doesn't round to -0 + __ Move(double_temp2, -.5); + __ BranchF64(&tail, nullptr, lt, double_temp1, double_temp2); + + // +0 doesn't round to -0 + __ FmoveHigh(temp1, double_temp1); + __ Branch(&tail, ne, temp1, Operand(0x80000000)); + + __ sll(temp1, slot, 1); + __ Addu(temp1, temp1, vector); + __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ sw(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ Branch(&tail); +} + + +void CallIC_FloorStub::Generate(MacroAssembler* masm) { + Register function = a1; + Register vector = a2; + Register slot = a3; + + Register temp1 = a0; + Register temp2 = t0; + DoubleRegister double_temp = f12; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss); + + if (arg_count() > 0) { + __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ ldc1(double_temp, FieldMemOperand(temp1, HeapNumber::kValueOffset)); + + // Only -0 floors to -0. + __ FmoveHigh(temp1, double_temp); + __ Branch(&tail, ne, temp1, Operand(0x80000000)); + __ FmoveLow(temp1, double_temp); + __ Branch(&tail, ne, temp1, Operand(zero_reg)); + + __ sll(temp1, slot, 1); + __ Addu(temp1, temp1, vector); + __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ sw(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ Branch(&tail); +} + + +void CallIC_CeilStub::Generate(MacroAssembler* masm) { + Register function = a1; + Register vector = a2; + Register slot = a3; + + Register temp1 = a0; + Register temp2 = t0; + DoubleRegister double_temp1 = f12; + DoubleRegister double_temp2 = f14; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss); + + if (arg_count() > 0) { + __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ Move(double_temp2, 0.0); + __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2); + + // If the number is <=-1, it doesn't round to -0 + __ Move(double_temp2, -1.0); + __ BranchF64(&tail, nullptr, le, double_temp1, double_temp2); + + // +0 doesn't round to -0. + __ FmoveHigh(temp1, double_temp1); + __ Branch(&tail, ne, temp1, Operand(0x80000000)); + + __ sll(temp1, slot, 1); + __ Addu(temp1, temp1, vector); + __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ sw(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ Branch(&tail); +} + + void CallICStub::Generate(MacroAssembler* masm) { // a1 - function // a3 - slot id (Smi) @@ -2963,6 +3112,11 @@ void CallICStub::Generate(MacroAssembler* masm) { __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0); __ Branch(&miss, eq, a1, Operand(t0)); + // Some builtin functions require special handling, miss to the runtime. + __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); + __ Branch(&miss, ne, t0, Operand(Smi::FromInt(0))); + // Update stats. __ lw(t0, FieldMemOperand(a2, with_types_offset)); __ Addu(t0, t0, Operand(Smi::FromInt(1))); @@ -4591,6 +4745,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, a2); + CallIC_RoundStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, a2); + CallIC_FloorStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, a2); + CallIC_CeilStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + void VectorRawLoadStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index 030a634ce..8c0a15a38 100644 --- a/src/mips/macro-assembler-mips.cc +++ b/src/mips/macro-assembler-mips.cc @@ -4602,6 +4602,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { } +void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss) { + DCHECK(!temp.is(at)); + lw(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); + lw(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset)); + Branch(miss, ne, temp, Operand(Smi::FromInt(id))); +} + + void MacroAssembler::SetCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { if (FLAG_native_code_counters && counter->Enabled()) { diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h index cdbd10331..c46cfdcef 100644 --- a/src/mips/macro-assembler-mips.h +++ b/src/mips/macro-assembler-mips.h @@ -1323,6 +1323,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT // Store the function for the given builtin in the target register. void GetBuiltinFunction(Register target, Builtins::JavaScript id); + void BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss); + struct Unresolved { int pc; uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders. diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc index 3870ff315..32a663460 100644 --- a/src/mips64/code-stubs-mips64.cc +++ b/src/mips64/code-stubs-mips64.cc @@ -2931,6 +2931,155 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundStub::Generate(MacroAssembler* masm) { + Register function = a1; + Register vector = a2; + Register slot = a3; + + Register temp1 = a0; + Register temp2 = a4; + DoubleRegister double_temp1 = f12; + DoubleRegister double_temp2 = f14; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss); + + if (arg_count() > 0) { + __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ Move(double_temp2, 0.0); + __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2); + + // If the number is <-.5, it doesn't round to -0 + __ Move(double_temp2, -.5); + __ BranchF64(&tail, nullptr, lt, double_temp1, double_temp2); + + // +0 doesn't round to -0 + __ FmoveHigh(temp1, double_temp1); + __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000)); + + __ SmiScale(temp1, slot, kPointerSizeLog2); + __ Daddu(temp1, temp1, vector); + __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ sd(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ Branch(&tail); +} + + +void CallIC_FloorStub::Generate(MacroAssembler* masm) { + Register function = a1; + Register vector = a2; + Register slot = a3; + + Register temp1 = a0; + Register temp2 = a4; + DoubleRegister double_temp = f12; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss); + + if (arg_count() > 0) { + __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ ldc1(double_temp, FieldMemOperand(temp1, HeapNumber::kValueOffset)); + + // Only -0 floors to -0. + __ FmoveHigh(temp1, double_temp); + __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000)); + __ FmoveLow(temp1, double_temp); + __ Branch(&tail, ne, temp1, Operand(zero_reg)); + + __ SmiScale(temp1, slot, kPointerSizeLog2); + __ Daddu(temp1, temp1, vector); + __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ sd(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ Branch(&tail); +} + + +void CallIC_CeilStub::Generate(MacroAssembler* masm) { + Register function = a1; + Register vector = a2; + Register slot = a3; + + Register temp1 = a0; + Register temp2 = a4; + DoubleRegister double_temp1 = f12; + DoubleRegister double_temp2 = f14; + Label tail, miss; + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss); + + if (arg_count() > 0) { + __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); + __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ Move(double_temp2, 0.0); + __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2); + + // If the number is <=-1, it doesn't round to -0 + __ Move(double_temp2, -1.0); + __ BranchF64(&tail, nullptr, le, double_temp1, double_temp2); + + // +0 doesn't round to -0. + __ FmoveHigh(temp1, double_temp1); + __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000)); + + __ SmiScale(temp1, slot, kPointerSizeLog2); + __ Daddu(temp1, temp1, vector); + __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); + __ sd(temp2, + FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); + } + + __ bind(&tail); + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ stop("Unreachable"); + + __ bind(&miss); + GenerateMiss(masm); + __ Branch(&tail); +} + + void CallICStub::Generate(MacroAssembler* masm) { // a1 - function // a3 - slot id (Smi) @@ -3041,6 +3190,11 @@ void CallICStub::Generate(MacroAssembler* masm) { __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4); __ Branch(&miss, eq, a1, Operand(a4)); + // Some builtin functions require special handling, miss to the runtime. + __ ld(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); + __ Branch(&miss, ne, t0, Operand(Smi::FromInt(0))); + // Update stats. __ ld(a4, FieldMemOperand(a2, with_types_offset)); __ Daddu(a4, a4, Operand(Smi::FromInt(1))); @@ -4635,6 +4789,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, a2); + CallIC_RoundStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, a2); + CallIC_FloorStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, a2); + CallIC_CeilStub stub(isolate(), state()); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + void VectorRawLoadStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc index 0e7955de0..89ed2cd88 100644 --- a/src/mips64/macro-assembler-mips64.cc +++ b/src/mips64/macro-assembler-mips64.cc @@ -4576,6 +4576,14 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { } +void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss) { + ld(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); + ld(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset)); + Branch(miss, ne, temp, Operand(Smi::FromInt(id))); +} + + void MacroAssembler::SetCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { if (FLAG_native_code_counters && counter->Enabled()) { diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h index 2d84b4061..f45c48723 100644 --- a/src/mips64/macro-assembler-mips64.h +++ b/src/mips64/macro-assembler-mips64.h @@ -1356,6 +1356,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT // Store the function for the given builtin in the target register. void GetBuiltinFunction(Register target, Builtins::JavaScript id); + void BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss); + struct Unresolved { int pc; uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders. diff --git a/src/snapshot/serialize.cc b/src/snapshot/serialize.cc index dbe92a6ac..50264808c 100644 --- a/src/snapshot/serialize.cc +++ b/src/snapshot/serialize.cc @@ -130,6 +130,8 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) { "Logger::LeaveExternal"); Add(ExternalReference::address_of_minus_one_half().address(), "double_constants.minus_one_half"); + Add(ExternalReference::address_of_minus_one().address(), + "double_constants.minus_one"); Add(ExternalReference::stress_deopt_count(isolate).address(), "Isolate::stress_deopt_count_address()"); diff --git a/src/type-feedback-vector.cc b/src/type-feedback-vector.cc index 3cf81f8c5..9b03e0fc1 100644 --- a/src/type-feedback-vector.cc +++ b/src/type-feedback-vector.cc @@ -4,6 +4,7 @@ #include "src/v8.h" +#include "src/code-stubs.h" #include "src/ic/ic.h" #include "src/ic/ic-state.h" #include "src/objects.h" @@ -312,7 +313,9 @@ InlineCacheState CallICNexus::StateFromFeedback() const { Isolate* isolate = GetIsolate(); Object* feedback = GetFeedback(); DCHECK(!FLAG_vector_ics || - GetFeedbackExtra() == *vector()->UninitializedSentinel(isolate)); + GetFeedbackExtra() == *vector()->UninitializedSentinel(isolate) || + GetFeedbackExtra() == + Smi::FromInt(CallICStub::kHasReturnedMinusZeroSentinel)); if (feedback == *vector()->MegamorphicSentinel(isolate)) { return GENERIC; @@ -343,6 +346,14 @@ void CallICNexus::ConfigureMonomorphicArray() { } +void CallICNexus::ConfigureMonomorphicMathFunction( + Handle function) { + Handle new_cell = GetIsolate()->factory()->NewWeakCell(function); + SetFeedback(*new_cell); + SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate())); +} + + void CallICNexus::ConfigureUninitialized() { SetFeedback(*vector()->UninitializedSentinel(GetIsolate()), SKIP_WRITE_BARRIER); diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h index f4887a3e6..e4d384fe5 100644 --- a/src/type-feedback-vector.h +++ b/src/type-feedback-vector.h @@ -355,6 +355,7 @@ class CallICNexus : public FeedbackNexus { void ConfigureUninitialized(); void ConfigureGeneric(); void ConfigureMonomorphicArray(); + void ConfigureMonomorphicMathFunction(Handle function); void ConfigureMonomorphic(Handle function); InlineCacheState StateFromFeedback() const override; diff --git a/src/type-info.cc b/src/type-info.cc index 4ad66f855..f50949508 100644 --- a/src/type-info.cc +++ b/src/type-info.cc @@ -142,6 +142,22 @@ bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorICSlot slot) { } +bool TypeFeedbackOracle::CallIsBuiltinWithMinusZeroResult( + FeedbackVectorICSlot slot) { + Handle value = GetInfo(slot); + if (!value->IsJSFunction()) return false; + Handle maybe_round(Handle::cast(value)); + if (!maybe_round->shared()->HasBuiltinFunctionId()) return false; + if (maybe_round->shared()->builtin_function_id() != kMathRound && + maybe_round->shared()->builtin_function_id() != kMathFloor && + maybe_round->shared()->builtin_function_id() != kMathCeil) { + return false; + } + return feedback_vector_->get(feedback_vector_->GetIndex(slot) + 1) == + Smi::FromInt(CallICStub::kHasReturnedMinusZeroSentinel); +} + + bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackVectorSlot slot) { Handle info = GetInfo(slot); return FLAG_pretenuring_call_new diff --git a/src/type-info.h b/src/type-info.h index 76a45dc84..45c485c55 100644 --- a/src/type-info.h +++ b/src/type-info.h @@ -28,6 +28,7 @@ class TypeFeedbackOracle: public ZoneObject { bool StoreIsUninitialized(TypeFeedbackId id); bool CallIsUninitialized(FeedbackVectorICSlot slot); bool CallIsMonomorphic(FeedbackVectorICSlot slot); + bool CallIsBuiltinWithMinusZeroResult(FeedbackVectorICSlot slot); bool KeyedArrayCallIsHoley(TypeFeedbackId id); bool CallNewIsMonomorphic(FeedbackVectorSlot slot); diff --git a/src/typing.cc b/src/typing.cc index ab015717e..361309952 100644 --- a/src/typing.cc +++ b/src/typing.cc @@ -540,11 +540,15 @@ void AstTyper::VisitCall(Call* expr) { if (expr->IsUsingCallFeedbackICSlot(isolate())) { FeedbackVectorICSlot slot = expr->CallFeedbackICSlot(); is_uninitialized = oracle()->CallIsUninitialized(slot); - if (!expr->expression()->IsProperty() && - oracle()->CallIsMonomorphic(slot)) { - expr->set_target(oracle()->GetCallTarget(slot)); - Handle site = oracle()->GetCallAllocationSite(slot); - expr->set_allocation_site(site); + if (oracle()->CallIsMonomorphic(slot)) { + if (oracle()->CallIsBuiltinWithMinusZeroResult(slot)) { + expr->MarkShouldHandleMinusZeroResult(); + } + if (!expr->expression()->IsProperty()) { + expr->set_target(oracle()->GetCallTarget(slot)); + Handle site = oracle()->GetCallAllocationSite(slot); + expr->set_allocation_site(site); + } } } diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index ec6d53d01..fba12b14a 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -2144,6 +2144,161 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundStub::Generate(MacroAssembler* masm) { + Register function = rdi; + Register vector = rbx; + Register slot = rdx; + + Register temp = rax; + XMMRegister xmm_temp1 = xmm1; + XMMRegister xmm_temp2 = xmm0; + Label tail, miss; + + __ SmiToInteger64(slot, slot); + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp, kMathRound, &miss); + + if (arg_count() > 0) { + __ movp(temp, Operand(rsp, arg_count() * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp, map, &tail, DO_SMI_CHECK); + + __ movsd(xmm_temp1, FieldOperand(temp, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ xorps(xmm_temp2, xmm_temp2); + __ ucomisd(xmm_temp1, xmm_temp2); + __ j(above, &tail, Label::kNear); + + // If the number is <-.5, it doesn't round to -0 + static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 + __ movq(temp, minus_one_half); + __ movq(xmm_temp2, temp); + __ ucomisd(xmm_temp1, xmm_temp2); + __ j(below, &tail, Label::kNear); + + // +0 doesn't round to -0 + __ movmskpd(temp, xmm_temp1); + __ testl(temp, Immediate(1)); + __ j(zero, &tail, Label::kNear); + + __ Move(FieldOperand(vector, slot, times_pointer_size, + FixedArray::kHeaderSize + kPointerSize), + Smi::FromInt(kHasReturnedMinusZeroSentinel)); + } + + __ bind(&tail); + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ int3(); + + __ bind(&miss); + GenerateMiss(masm); + __ jmp(&tail); +} + + +void CallIC_FloorStub::Generate(MacroAssembler* masm) { + Register function = rdi; + Register vector = rbx; + Register slot = rdx; + + Register temp1 = rax; + Register temp2 = rsi; + Label tail, miss; + + __ SmiToInteger64(slot, slot); + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss); + + if (arg_count() > 0) { + __ movp(temp1, Operand(rsp, arg_count() * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp1, map, &tail, DO_SMI_CHECK); + + // Only -0 floors to -0. + __ movq(temp1, FieldOperand(temp1, HeapNumber::kValueOffset)); + static int64_t minus_zero = V8_INT64_C(0x8000000000000000); // -0.0 + __ movq(temp2, minus_zero); + __ cmpq(temp1, temp2); + __ j(not_equal, &tail); + + __ Move(FieldOperand(vector, slot, times_pointer_size, + FixedArray::kHeaderSize + kPointerSize), + Smi::FromInt(kHasReturnedMinusZeroSentinel)); + } + + __ bind(&tail); + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ int3(); + + __ bind(&miss); + GenerateMiss(masm); + __ jmp(&tail); +} + + +void CallIC_CeilStub::Generate(MacroAssembler* masm) { + Register function = rdi; + Register vector = rbx; + Register slot = rdx; + + Register temp = rax; + XMMRegister xmm_temp1 = xmm1; + XMMRegister xmm_temp2 = xmm0; + Label tail, miss; + + __ SmiToInteger64(slot, slot); + + // Ensure nobody has snuck in another function. + __ BranchIfNotBuiltin(function, temp, kMathCeil, &miss); + + if (arg_count() > 0) { + __ movp(temp, Operand(rsp, arg_count() * kPointerSize)); + Handle map = isolate()->factory()->heap_number_map(); + __ CheckMap(temp, map, &tail, DO_SMI_CHECK); + + __ movsd(xmm_temp1, FieldOperand(rax, HeapNumber::kValueOffset)); + + // If the number is >0, it doesn't round to -0 + __ xorps(xmm_temp2, xmm_temp2); + __ ucomisd(xmm_temp1, xmm_temp2); + __ j(greater, &tail, Label::kNear); + + // If the number is <=-1, it doesn't round to -0 + static int64_t minus_one = V8_INT64_C(0xbff0000000000000); // -1 + __ movq(temp, minus_one); + __ movq(xmm_temp2, temp); + __ ucomisd(xmm_temp1, xmm_temp2); + __ j(less_equal, &tail, Label::kNear); + + // +0 doesn't round to -0. + __ movmskpd(temp, xmm_temp1); + __ testq(temp, Immediate(1)); + __ j(zero, &tail, Label::kNear); + + __ Move(FieldOperand(vector, slot, times_pointer_size, + FixedArray::kHeaderSize + kPointerSize), + Smi::FromInt(kHasReturnedMinusZeroSentinel)); + } + + __ bind(&tail); + CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); + + // Unreachable. + __ int3(); + + __ bind(&miss); + GenerateMiss(masm); + __ jmp(&tail); +} + + void CallICStub::Generate(MacroAssembler* masm) { // rdi - function // rdx - slot id @@ -2253,6 +2408,11 @@ void CallICStub::Generate(MacroAssembler* masm) { __ cmpp(rdi, rcx); __ j(equal, &miss); + __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ movp(rax, FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset)); + __ Cmp(rax, Smi::FromInt(0)); + __ j(not_equal, &miss); + // Update stats. __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1)); @@ -4569,6 +4729,27 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { } +void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, rbx); + CallIC_RoundStub stub(isolate(), state()); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, rbx); + CallIC_FloorStub stub(isolate(), state()); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) { + EmitLoadTypeFeedbackVector(masm, rbx); + CallIC_CeilStub stub(isolate(), state()); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { ProfileEntryHookStub stub(masm->isolate()); diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 0e70826df..e6a06dbc0 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -733,6 +733,15 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { } +void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss) { + movp(temp, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); + movp(temp, FieldOperand(temp, SharedFunctionInfo::kFunctionDataOffset)); + Cmp(temp, Smi::FromInt(id)); + j(not_equal, miss); +} + + #define REG(Name) { kRegister_ ## Name ## _Code } static const Register saved_regs[] = { diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 0016d9932..57dcf34ec 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -378,6 +378,8 @@ class MacroAssembler: public Assembler { // Store the code object for the given builtin in the target register. void GetBuiltinEntry(Register target, Builtins::JavaScript id); + void BranchIfNotBuiltin(Register function, Register temp, + BuiltinFunctionId id, Label* miss); // --------------------------------------------------------------------------- // Smi tagging, untagging and operations on tagged smis. diff --git a/test/mjsunit/math-ceil-minus-zero.js b/test/mjsunit/math-ceil-minus-zero.js new file mode 100644 index 000000000..660497c59 --- /dev/null +++ b/test/mjsunit/math-ceil-minus-zero.js @@ -0,0 +1,75 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax --noalways-opt + +// Test that a -0 result doesn't cause deopt loops +function noDeoptLoop(x) { + return Math.ceil(x); +} +assertEquals(1, noDeoptLoop(0.4)); +assertEquals(1, noDeoptLoop(0.4)); +assertEquals(1, noDeoptLoop(0.4)); +%OptimizeFunctionOnNextCall(noDeoptLoop); +assertEquals(1, noDeoptLoop(0.4)); +assertEquals(1, noDeoptLoop(0.4)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.0)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.0)); +assertUnoptimized(noDeoptLoop); +assertEquals(-Infinity, 1/noDeoptLoop(-0.0)); +assertEquals(-1.0, 1/noDeoptLoop(-1.0)); +assertEquals(Infinity, 1/noDeoptLoop(0)); +%OptimizeFunctionOnNextCall(noDeoptLoop); +assertEquals(-Infinity, 1/noDeoptLoop(-0.0)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.1)); +assertOptimized(noDeoptLoop); +%ClearFunctionTypeFeedback(noDeoptLoop); +%DeoptimizeFunction(noDeoptLoop); + +// Test that ceil that goes megamorphic is handled correctly. +function notCeil(x) { + return -x; +} +function testMega(f, x) { + return f(x); +} +assertEquals(8, testMega(Math.ceil, 7.4)); +assertEquals(8, testMega(Math.ceil, 7.4)); +assertEquals(8, testMega(Math.ceil, 7.4)); +assertEquals(-7.4, testMega(notCeil, 7.4)); + +// Make sure that we can learn about ceil specialization from Cranskhaft, which +// doesn't insert soft deopts for CallICs. +function crankCeilLearn(x) { + return Math.ceil(x); +} +%OptimizeFunctionOnNextCall(crankCeilLearn); +assertEquals(12, crankCeilLearn(11.3)); +assertOptimized(crankCeilLearn); +assertEquals(-Infinity, 1/crankCeilLearn(-0.0)); +assertOptimized(crankCeilLearn); +assertEquals(-Infinity, 1/crankCeilLearn(-0.75)); diff --git a/test/mjsunit/math-floor-minus-zero.js b/test/mjsunit/math-floor-minus-zero.js new file mode 100644 index 000000000..5488f80a2 --- /dev/null +++ b/test/mjsunit/math-floor-minus-zero.js @@ -0,0 +1,75 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax --noalways-opt + +// Test that a -0 result doesn't cause deopt loops +function noDeoptLoop(x) { + return Math.floor(x); +} +assertEquals(0, noDeoptLoop(0.4)); +assertEquals(0, noDeoptLoop(0.4)); +assertEquals(0, noDeoptLoop(0.4)); +%OptimizeFunctionOnNextCall(noDeoptLoop); +assertEquals(0, noDeoptLoop(0.4)); +assertEquals(0, noDeoptLoop(0.4)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.0)); +assertUnoptimized(noDeoptLoop); +assertEquals(-1, 1/noDeoptLoop(-1.0)); +assertEquals(-1, 1/noDeoptLoop(-0.9)); +assertEquals(-1, 1/noDeoptLoop(-0.4)); +assertEquals(-1, 1/noDeoptLoop(-0.5)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.0)); +%OptimizeFunctionOnNextCall(noDeoptLoop); +assertEquals(-Infinity, 1/noDeoptLoop(-0.0)); +assertOptimized(noDeoptLoop); +%ClearFunctionTypeFeedback(noDeoptLoop); +%DeoptimizeFunction(noDeoptLoop); + +// Test that floor that goes megamorphic is handled correctly. +function notFloor(x) { + return -x; +} +function testMega(f, x) { + return f(x); +} +assertEquals(7, testMega(Math.floor, 7.4)); +assertEquals(7, testMega(Math.floor, 7.4)); +assertEquals(7, testMega(Math.floor, 7.4)); +assertEquals(-7.4, testMega(notFloor, 7.4)); + +// Make sure that we can learn about floor specialization from Cranskhaft, which +// doesn't insert soft deopts for CallICs. +function crankFloorLearn(x) { + return Math.floor(x); +} +%OptimizeFunctionOnNextCall(crankFloorLearn); +assertEquals(12, crankFloorLearn(12.3)); +assertOptimized(crankFloorLearn); +assertEquals(-Infinity, 1/crankFloorLearn(-0.0)); +assertOptimized(crankFloorLearn); +assertEquals(-Infinity, 1/crankFloorLearn(-0.0)); diff --git a/test/mjsunit/math-round-minus-zero.js b/test/mjsunit/math-round-minus-zero.js new file mode 100644 index 000000000..2b1ab2fd7 --- /dev/null +++ b/test/mjsunit/math-round-minus-zero.js @@ -0,0 +1,76 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax --noalways-opt + +// Test that a -0 result doesn't cause deopt loops +function noDeoptLoop(x) { + return Math.round(x); +} +assertEquals(0, noDeoptLoop(0.4)); +assertEquals(0, noDeoptLoop(0.4)); +assertEquals(0, noDeoptLoop(0.4)); +%OptimizeFunctionOnNextCall(noDeoptLoop); +assertEquals(0, noDeoptLoop(0.4)); +assertEquals(0, noDeoptLoop(0.4)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.4)); +assertUnoptimized(noDeoptLoop); +assertEquals(-Infinity, 1/noDeoptLoop(-0.4)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.0)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.5)); +assertEquals(-1, noDeoptLoop(-1)); +assertEquals(-1, noDeoptLoop(-0.51)); +%OptimizeFunctionOnNextCall(noDeoptLoop); +assertEquals(-Infinity, 1/noDeoptLoop(-0.4)); +assertEquals(-Infinity, 1/noDeoptLoop(-0.4)); +assertOptimized(noDeoptLoop); +%ClearFunctionTypeFeedback(noDeoptLoop); +%DeoptimizeFunction(noDeoptLoop); + +// Test that round that goes megamorphic is handled correctly. +function notRound(x) { + return -x; +} +function testMega(f, x) { + return f(x); +} +assertEquals(7, testMega(Math.round, 7.4)); +assertEquals(7, testMega(Math.round, 7.4)); +assertEquals(7, testMega(Math.round, 7.4)); +assertEquals(-7.4, testMega(notRound, 7.4)); + +// Make sure that we can learn about round specialization from Cranskhaft, which +// doesn't insert soft deopts for CallICs. +function crankRoundLearn(x) { + return Math.round(x); +} +%OptimizeFunctionOnNextCall(crankRoundLearn); +assertEquals(12, crankRoundLearn(12.3)); +assertOptimized(crankRoundLearn); +assertEquals(-Infinity, 1/crankRoundLearn(-0.4)); +assertOptimized(crankRoundLearn); +assertEquals(-Infinity, 1/crankRoundLearn(-0.4)); -- 2.34.1