}
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+ Register function = r1;
+ Register vector = r2;
+ Register slot = r3;
+
+ Register temp1 = r0;
+ Register temp2 = r4;
+ DwVfpRegister double_temp1 = d1;
+ DwVfpRegister double_temp2 = d2;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss);
+
+ if (arg_count() > 0) {
+ __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ sub(temp1, temp1, Operand(kHeapObjectTag));
+ __ vldr(double_temp1, temp1, HeapNumber::kValueOffset);
+
+ // If the number is >0, it doesn't round to -0
+ __ Vmov(double_temp2, 0, temp1);
+ __ VFPCompareAndSetFlags(double_temp1, double_temp2);
+ __ b(gt, &tail);
+
+ // If the number is <-.5, it doesn't round to -0
+ __ Vmov(double_temp2, -.5, temp1);
+ __ VFPCompareAndSetFlags(double_temp1, double_temp2);
+ __ b(lt, &tail);
+
+ // +0 doesn't round to -0
+ __ VmovHigh(temp1, double_temp1);
+ __ cmp(temp1, Operand(0x80000000));
+ __ b(ne, &tail);
+
+ __ mov(temp1, Operand(slot, LSL, 1));
+ __ add(temp1, temp1, vector);
+ __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ str(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ b(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+ Register function = r1;
+ Register vector = r2;
+ Register slot = r3;
+
+ Register temp1 = r0;
+ Register temp2 = r4;
+ DwVfpRegister double_temp = d1;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+ if (arg_count() > 0) {
+ __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ sub(temp1, temp1, Operand(kHeapObjectTag));
+ __ vldr(double_temp, temp1, HeapNumber::kValueOffset);
+
+ // Only -0 floors to -0.
+ __ VmovHigh(temp1, double_temp);
+ __ cmp(temp1, Operand(0x80000000));
+ __ b(ne, &tail);
+ __ VmovLow(temp1, double_temp);
+ __ cmp(temp1, Operand(0));
+ __ b(ne, &tail);
+
+ __ mov(temp1, Operand(slot, LSL, 1));
+ __ add(temp1, temp1, vector);
+ __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ str(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ b(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+ Register function = r1;
+ Register vector = r2;
+ Register slot = r3;
+
+ Register temp1 = r0;
+ Register temp2 = r4;
+ DwVfpRegister double_temp1 = d1;
+ DwVfpRegister double_temp2 = d2;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss);
+
+ if (arg_count() > 0) {
+ __ ldr(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ sub(temp1, temp1, Operand(kHeapObjectTag));
+ __ vldr(double_temp1, temp1, HeapNumber::kValueOffset);
+
+ // If the number is >0, it doesn't round to -0
+ __ Vmov(double_temp2, 0, temp1);
+ __ VFPCompareAndSetFlags(double_temp1, double_temp2);
+ __ b(gt, &tail);
+
+ // If the number is <=-1, it doesn't round to -0
+ __ Vmov(double_temp2, -1, temp1);
+ __ VFPCompareAndSetFlags(double_temp1, double_temp2);
+ __ b(le, &tail);
+
+ // +0 doesn't round to -0.
+ __ VmovHigh(temp1, double_temp1);
+ __ cmp(temp1, Operand(0x80000000));
+ __ b(ne, &tail);
+
+ __ mov(temp1, Operand(slot, LSL, 1));
+ __ add(temp1, temp1, vector);
+ __ Move(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ str(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ b(&tail);
+}
+
+
void CallICStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id (Smi)
__ cmp(r1, r4);
__ b(eq, &miss);
+ // Some builtin functions require special handling, miss to the runtime.
+ __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+ __ cmp(r0, Operand(Smi::FromInt(0)));
+ __ b(ne, &miss);
+
// Update stats.
__ ldr(r4, FieldMemOperand(r2, with_types_offset));
__ add(r4, r4, Operand(Smi::FromInt(1)));
}
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r2);
+ CallIC_RoundStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r2);
+ CallIC_FloorStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r2);
+ CallIC_CeilStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
}
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss) {
+ ldr(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ ldr(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+ cmp(temp, Operand(Smi::FromInt(id)));
+ b(ne, miss);
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss);
+
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+ Register function = x1;
+ Register vector = x2;
+ Register slot = x3;
+
+ Register temp1 = x0;
+ Register temp2 = x4;
+ DoubleRegister double_temp1 = d1;
+ DoubleRegister double_temp2 = d2;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss);
+
+ if (arg_count() > 0) {
+ __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ Sub(temp1, temp1, Operand(kHeapObjectTag));
+ __ Ldr(double_temp1, MemOperand(temp1, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ Fmov(double_temp2, 0);
+ __ Fcmp(double_temp1, double_temp2);
+ __ B(gt, &tail);
+
+ // If the number is <-.5, it doesn't round to -0
+ __ Fmov(double_temp2, -.5);
+ __ Fcmp(double_temp1, double_temp2);
+ __ B(lt, &tail);
+
+ __ Fmov(temp1, double_temp1);
+ __ Cmp(temp1, Operand(0x8000000000000000));
+ __ B(ne, &tail);
+
+ __ SmiUntag(temp1, slot);
+ __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2));
+ __ Add(temp1, temp1, vector);
+ __ Mov(temp2, Smi::FromInt(kHasReturnedMinusZeroSentinel));
+ __ Str(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ __ Unreachable();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ b(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+ Register function = x1;
+ Register vector = x2;
+ Register slot = x3;
+
+ Register temp1 = x0;
+ Register temp2 = x4;
+ DoubleRegister double_temp = d1;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+ if (arg_count() > 0) {
+ __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ Sub(temp1, temp1, Operand(kHeapObjectTag));
+ __ Ldr(double_temp, MemOperand(temp1, HeapNumber::kValueOffset));
+
+ // Only -0 floors to -0.
+ __ Fmov(temp1, double_temp);
+ __ Cmp(temp1, Operand(0x8000000000000000));
+ __ B(ne, &tail);
+
+ __ SmiUntag(temp1, slot);
+ __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2));
+ __ Add(temp1, temp1, vector);
+ __ Mov(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ Str(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ __ Unreachable();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ b(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+ Register function = x1;
+ Register vector = x2;
+ Register slot = x3;
+
+ Register temp1 = x0;
+ Register temp2 = x4;
+ DoubleRegister double_temp1 = d1;
+ DoubleRegister double_temp2 = d2;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss);
+
+ if (arg_count() > 0) {
+ __ Ldr(temp1, MemOperand(jssp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ Sub(temp1, temp1, Operand(kHeapObjectTag));
+ __ Ldr(double_temp1, MemOperand(temp1, HeapNumber::kValueOffset));
+
+ // If the number is positive, it doesn't ceil to -0
+ __ Fmov(double_temp2, 0);
+ __ Fcmp(double_temp1, double_temp2);
+ __ B(gt, &tail);
+
+ // If it's less or equal to 1, it doesn't ceil to -0
+ __ Fmov(double_temp2, -1);
+ __ Fcmp(double_temp1, double_temp2);
+ __ B(le, &tail);
+
+ // +Zero doesn't round to -0
+ __ Fmov(temp1, double_temp1);
+ __ Cmp(temp1, Operand(0x8000000000000000));
+ __ B(ne, &tail);
+
+ __ SmiUntag(temp1, slot);
+ __ Mov(temp1, Operand(temp1, LSL, kPointerSizeLog2));
+ __ Add(temp1, temp1, vector);
+ __ Mov(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ Str(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ __ Unreachable();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ b(&tail);
+}
+
+
void CallICStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallICStub");
__ Cmp(function, x5);
__ B(eq, &miss);
+ // Some builtin functions require special handling, miss to the runtime.
+ __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x0, FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ __ Cmp(x0, Operand(Smi::FromInt(0)));
+ __ B(ne, &miss);
+
// Update stats.
__ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
}
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, x2);
+ CallIC_RoundStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, x2);
+ CallIC_FloorStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, x2);
+ CallIC_CeilStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
}
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss) {
+ Ldr(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ Ldr(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+ Cmp(temp, Operand(Smi::FromInt(id)));
+ B(ne, miss);
+}
+
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss);
+
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode);
void Jump(Handle<Code> code, RelocInfo::Mode rmode);
struct DoubleConstant BASE_EMBEDDED {
double min_int;
double one_half;
+double minus_one;
double minus_one_half;
double negative_infinity;
double the_hole_nan;
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
+ double_constants.minus_one = -1;
double_constants.minus_one_half = -0.5;
double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
}
+ExternalReference ExternalReference::address_of_minus_one() {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.minus_one));
+}
+
+
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.negative_infinity));
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
+ static ExternalReference address_of_minus_one();
static ExternalReference address_of_minus_one_half();
static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_the_hole_nan();
bit_field_ = IsUninitializedField::update(bit_field_, b);
}
+ void MarkShouldHandleMinusZeroResult() {
+ bit_field_ = ShouldHandleMinusZeroResultField::update(bit_field_, true);
+ }
+ bool ShouldHandleMinusZeroResult() {
+ return ShouldHandleMinusZeroResultField::decode(bit_field_);
+ }
+
enum CallType {
POSSIBLY_EVAL_CALL,
GLOBAL_CALL,
ic_slot_or_slot_(FeedbackVectorICSlot::Invalid().ToInt()),
expression_(expression),
arguments_(arguments),
- bit_field_(IsUninitializedField::encode(false)) {
+ bit_field_(IsUninitializedField::encode(false) |
+ ShouldHandleMinusZeroResultField::encode(false)) {
if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call();
}
Handle<JSFunction> target_;
Handle<AllocationSite> allocation_site_;
class IsUninitializedField : public BitField8<bool, 0, 1> {};
+ class ShouldHandleMinusZeroResultField : public BitField8<bool, 1, 1> {};
uint8_t bit_field_;
};
}
+void CallIC_RoundStub::PrintState(std::ostream& os) const { // NOLINT
+ os << state() << " (Round)";
+}
+
+
+void CallIC_FloorStub::PrintState(std::ostream& os) const { // NOLINT
+ os << state() << " (Floor)";
+}
+
+
+void CallIC_CeilStub::PrintState(std::ostream& os) const { // NOLINT
+ os << state() << " (Ceil)";
+}
+
+
void CallIC_ArrayStub::PrintState(std::ostream& os) const { // NOLINT
os << state() << " (Array)";
}
V(CallFunction) \
V(CallIC) \
V(CallIC_Array) \
+ V(CallIC_Round) \
+ V(CallIC_Floor) \
+ V(CallIC_Ceil) \
V(CEntry) \
V(CompareIC) \
V(DoubleToI) \
V(LoadICTrampoline) \
V(CallICTrampoline) \
V(CallIC_ArrayTrampoline) \
+ V(CallIC_RoundTrampoline) \
+ V(CallIC_FloorTrampoline) \
+ V(CallIC_CeilTrampoline) \
V(LoadIndexedInterceptor) \
V(LoadIndexedString) \
V(MathPow) \
return static_cast<ExtraICState>(minor_key_);
}
+ static const int kHasReturnedMinusZeroSentinel = 1;
+
protected:
bool CallAsMethod() const {
return state().call_type() == CallICState::METHOD;
};
+class CallIC_RoundStub : public CallICStub {
+ public:
+ CallIC_RoundStub(Isolate* isolate, const CallICState& state_in)
+ : CallICStub(isolate, state_in) {}
+
+ InlineCacheState GetICState() const final { return MONOMORPHIC; }
+
+ private:
+ void PrintState(std::ostream& os) const override; // NOLINT
+
+ DEFINE_PLATFORM_CODE_STUB(CallIC_Round, CallICStub);
+};
+
+
+class CallIC_FloorStub : public CallICStub {
+ public:
+ CallIC_FloorStub(Isolate* isolate, const CallICState& state_in)
+ : CallICStub(isolate, state_in) {}
+
+ InlineCacheState GetICState() const final { return MONOMORPHIC; }
+
+ private:
+ void PrintState(std::ostream& os) const override; // NOLINT
+
+ DEFINE_PLATFORM_CODE_STUB(CallIC_Floor, CallICStub);
+};
+
+
+class CallIC_CeilStub : public CallICStub {
+ public:
+ CallIC_CeilStub(Isolate* isolate, const CallICState& state_in)
+ : CallICStub(isolate, state_in) {}
+
+ InlineCacheState GetICState() const final { return MONOMORPHIC; }
+
+ private:
+ void PrintState(std::ostream& os) const override; // NOLINT
+
+ DEFINE_PLATFORM_CODE_STUB(CallIC_Ceil, CallICStub);
+};
+
+
class CallIC_ArrayStub: public CallICStub {
public:
CallIC_ArrayStub(Isolate* isolate, const CallICState& state_in)
};
+class CallIC_RoundTrampolineStub : public CallICTrampolineStub {
+ public:
+ CallIC_RoundTrampolineStub(Isolate* isolate, const CallICState& state)
+ : CallICTrampolineStub(isolate, state) {}
+
+ private:
+ DEFINE_PLATFORM_CODE_STUB(CallIC_RoundTrampoline, CallICTrampolineStub);
+};
+
+
+class CallIC_FloorTrampolineStub : public CallICTrampolineStub {
+ public:
+ CallIC_FloorTrampolineStub(Isolate* isolate, const CallICState& state)
+ : CallICTrampolineStub(isolate, state) {}
+
+ private:
+ DEFINE_PLATFORM_CODE_STUB(CallIC_FloorTrampoline, CallICTrampolineStub);
+};
+
+
+class CallIC_CeilTrampolineStub : public CallICTrampolineStub {
+ public:
+ CallIC_CeilTrampolineStub(Isolate* isolate, const CallICState& state)
+ : CallICTrampolineStub(isolate, state) {}
+
+ private:
+ DEFINE_PLATFORM_CODE_STUB(CallIC_CeilTrampoline, CallICTrampolineStub);
+};
+
+
class MegamorphicLoadStub : public HydrogenCodeStub {
public:
MegamorphicLoadStub(Isolate* isolate, const LoadICState& state)
if (!FLAG_fast_math) break;
// Fall through if FLAG_fast_math.
case kMathRound:
- case kMathFround:
case kMathFloor:
+ // If round has seen minus zero, don't inline, since that assumes
+ // returned value is an integer, which -0 definitely is not.
+ if (expr->ShouldHandleMinusZeroResult()) {
+ break;
+ }
+ case kMathFround:
case kMathAbs:
case kMathSqrt:
case kMathLog:
return true;
}
break;
+ case kMathCeil:
+ // If round/floor has seen minus zero, don't inline, since that assumes
+ // returned value is an integer, which -0 definitely is not.
+ if (expr->ShouldHandleMinusZeroResult()) {
+ break;
+ }
+ if (expr->arguments()->length() == 1) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HValue* op = NULL;
+ {
+ NoObservableSideEffectsScope s(this);
+ HValue* neg_arg =
+ AddUncasted<HMul>(graph()->GetConstantMinus1(), argument);
+ op = AddUncasted<HUnaryMathOperation>(neg_arg, kMathFloor);
+ }
+ HInstruction* neg_op =
+ NewUncasted<HMul>(graph()->GetConstantMinus1(), op);
+ ast_context()->ReturnInstruction(neg_op, expr->id());
+ return true;
+ }
+ break;
case kMathImul:
if (expr->arguments()->length() == 2) {
HValue* right = Pop();
if (!FLAG_fast_math) break;
// Fall through if FLAG_fast_math.
case kMathRound:
- case kMathFround:
case kMathFloor:
+ // If round/floor has seen minus zero, don't inline, since that assumes
+ // returned value is an integer, which -0 definitely is not.
+ if (expr->ShouldHandleMinusZeroResult()) {
+ break;
+ }
+ case kMathFround:
case kMathAbs:
case kMathSqrt:
case kMathLog:
return true;
}
break;
+ case kMathCeil:
+ // If round/floor has seen minus zero, don't inline, since that assumes
+ // returned value is an integer, which -0 definitely is not.
+ if (expr->ShouldHandleMinusZeroResult()) {
+ break;
+ }
+ if (argument_count == 2) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HValue* op = NULL;
+ {
+ NoObservableSideEffectsScope s(this);
+ HValue* neg_arg =
+ AddUncasted<HMul>(graph()->GetConstantMinus1(), argument);
+ op = AddUncasted<HUnaryMathOperation>(neg_arg, kMathFloor);
+ }
+ HInstruction* neg_op =
+ NewUncasted<HMul>(graph()->GetConstantMinus1(), op);
+ ast_context()->ReturnInstruction(neg_op, expr->id());
+ return true;
+ }
+ break;
case kMathPow:
if (argument_count == 3) {
HValue* right = Pop();
}
-void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
F(DoubleHi) \
F(DoubleLo) \
F(MathClz32) \
- F(MathFloor) \
F(MathSqrt) \
F(MathLogRT) \
/* ES6 Collections */ \
}
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+ Register function = edi;
+ Register vector = ebx;
+ Register slot = edx;
+
+ Register temp = eax;
+ XMMRegister xmm_temp1 = xmm0;
+ XMMRegister xmm_temp2 = xmm1;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp, kMathRound, &miss);
+
+ if (arg_count() > 0) {
+ ExternalReference minus_one_half =
+ ExternalReference::address_of_minus_one_half();
+
+ __ mov(temp, Operand(esp, arg_count() * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+ // If the number is positive, it doesn't round to -0
+ __ movsd(xmm_temp1, FieldOperand(eax, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ xorps(xmm_temp2, xmm_temp2);
+ __ ucomisd(xmm_temp1, xmm_temp2);
+ __ j(above, &tail, Label::kNear);
+
+ // If the number is <-.5, it doesn't round to -0
+ __ movsd(xmm_temp2, Operand::StaticVariable(minus_one_half));
+ __ ucomisd(xmm_temp1, xmm_temp2);
+ __ j(below, &tail, Label::kNear);
+
+ // The only positive result remaining is 0, it doesn't round to -0..
+ __ movmskpd(temp, xmm_temp1);
+ __ test(temp, Immediate(1));
+ __ j(zero, &tail, Label::kNear);
+
+ __ mov(FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ }
+
+ __ bind(&tail);
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ jmp(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+ Register function = edi;
+ Register vector = ebx;
+ Register slot = edx;
+
+ Register temp = eax;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp, kMathFloor, &miss);
+
+ if (arg_count() > 0) {
+ __ mov(temp, Operand(esp, arg_count() * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+ // The only number that floors to -0 is -0.
+ __ cmp(FieldOperand(temp, HeapNumber::kExponentOffset),
+ Immediate(0x80000000));
+ __ j(not_equal, &tail);
+
+ __ cmp(FieldOperand(temp, HeapNumber::kMantissaOffset), Immediate(0));
+ __ j(not_equal, &tail);
+
+ __ mov(FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ }
+
+ __ bind(&tail);
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ jmp(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+ Register function = edi;
+ Register vector = ebx;
+ Register slot = edx;
+
+ Register temp = eax;
+ XMMRegister xmm_temp1 = xmm0;
+ XMMRegister xmm_temp2 = xmm1;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp, kMathCeil, &miss);
+
+ if (arg_count() > 0) {
+ ExternalReference minus_one = ExternalReference::address_of_minus_one();
+
+ __ mov(temp, Operand(esp, arg_count() * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+ __ movsd(xmm_temp1, FieldOperand(eax, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ xorps(xmm_temp2, xmm_temp2);
+ __ ucomisd(xmm_temp1, xmm_temp2);
+ __ j(greater, &tail, Label::kNear);
+
+ // If the number is <=-1, it doesn't round to -0
+ __ movsd(xmm_temp2, Operand::StaticVariable(minus_one));
+ __ ucomisd(xmm_temp1, xmm_temp2);
+ __ j(less_equal, &tail, Label::kNear);
+
+ // The only positive result remaining is 0, it doesn't round to -0..
+ __ movmskpd(temp, xmm_temp1);
+ __ test(temp, Immediate(1));
+ __ j(zero, &tail, Label::kNear);
+
+ __ mov(FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ }
+
+ __ bind(&tail);
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ jmp(&tail);
+}
+
+
void CallICStub::Generate(MacroAssembler* masm) {
// edi - function
// edx - slot id
__ cmp(edi, ecx);
__ j(equal, &miss);
+ // Make sure that the function is not Math.floor, Math.round or Math.ceil
+ // which have special CallICs to handle -0.0.
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(0)));
+ __ j(not_equal, &miss);
+
// Update stats.
__ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
}
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, ebx);
+ CallIC_RoundStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, ebx);
+ CallIC_FloorStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, ebx);
+ CallIC_CeilStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
}
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss) {
+ mov(temp, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ mov(temp, FieldOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+ cmp(temp, Immediate(Smi::FromInt(id)));
+ j(not_equal, miss);
+}
+
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss);
+
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
// Are we the array function?
Handle<JSFunction> array_function =
Handle<JSFunction>(isolate()->native_context()->array_function());
+ CallICNexus* nexus = casted_nexus<CallICNexus>();
if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
// Alter the slot.
- CallICNexus* nexus = casted_nexus<CallICNexus>();
nexus->ConfigureMonomorphicArray();
// Vector-based ICs have a different calling convention in optimized code
OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(),
MONOMORPHIC);
return true;
+ } else {
+ Handle<JSFunction> maybe_builtin(Handle<JSFunction>::cast(function));
+ if (maybe_builtin->shared()->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = maybe_builtin->shared()->builtin_function_id();
+ switch (id) {
+ case kMathRound: {
+ nexus->ConfigureMonomorphicMathFunction(maybe_builtin);
+ if (AddressIsOptimizedCode()) {
+ CallIC_RoundStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ } else {
+ CallIC_RoundTrampolineStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ }
+ return true;
+ }
+ case kMathFloor:
+ nexus->ConfigureMonomorphicMathFunction(maybe_builtin);
+ if (AddressIsOptimizedCode()) {
+ CallIC_FloorStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ } else {
+ CallIC_FloorTrampolineStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ }
+ return true;
+ break;
+ case kMathCeil:
+ nexus->ConfigureMonomorphicMathFunction(maybe_builtin);
+ if (AddressIsOptimizedCode()) {
+ CallIC_CeilStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ } else {
+ CallIC_CeilTrampolineStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ }
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
}
return false;
}
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
- return -%_MathFloor(-x);
+ return -MathFloorJS(-x);
}
// ECMA 262 - 15.8.2.8
// ECMA 262 - 15.8.2.9
function MathFloorJS(x) {
- return %_MathFloor(+x);
+ return %MathFloor(+x);
}
// ECMA 262 - 15.8.2.10
// ES6 draft 09-27-13, section 20.2.2.34.
function MathTrunc(x) {
x = +x;
- if (x > 0) return %_MathFloor(x);
- if (x < 0) return -%_MathFloor(-x);
+ if (x > 0) return MathFloorJS(x);
+ if (x < 0) return -MathFloorJS(-x);
// -0, 0 or NaN.
return x;
}
}
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+ Register function = a1;
+ Register vector = a2;
+ Register slot = a3;
+
+ Register temp1 = a0;
+ Register temp2 = t0;
+ DoubleRegister double_temp1 = f12;
+ DoubleRegister double_temp2 = f14;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss);
+
+ if (arg_count() > 0) {
+ __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ Move(double_temp2, 0.0);
+ __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2);
+
+ // If the number is <-.5, it doesn't round to -0
+ __ Move(double_temp2, -.5);
+ __ BranchF64(&tail, nullptr, lt, double_temp1, double_temp2);
+
+ // +0 doesn't round to -0
+ __ FmoveHigh(temp1, double_temp1);
+ __ Branch(&tail, ne, temp1, Operand(0x80000000));
+
+ __ sll(temp1, slot, 1);
+ __ Addu(temp1, temp1, vector);
+ __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ sw(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ Branch(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+ Register function = a1;
+ Register vector = a2;
+ Register slot = a3;
+
+ Register temp1 = a0;
+ Register temp2 = t0;
+ DoubleRegister double_temp = f12;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+ if (arg_count() > 0) {
+ __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ ldc1(double_temp, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+ // Only -0 floors to -0.
+ __ FmoveHigh(temp1, double_temp);
+ __ Branch(&tail, ne, temp1, Operand(0x80000000));
+ __ FmoveLow(temp1, double_temp);
+ __ Branch(&tail, ne, temp1, Operand(zero_reg));
+
+ __ sll(temp1, slot, 1);
+ __ Addu(temp1, temp1, vector);
+ __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ sw(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ Branch(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+ Register function = a1;
+ Register vector = a2;
+ Register slot = a3;
+
+ Register temp1 = a0;
+ Register temp2 = t0;
+ DoubleRegister double_temp1 = f12;
+ DoubleRegister double_temp2 = f14;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss);
+
+ if (arg_count() > 0) {
+ __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ Move(double_temp2, 0.0);
+ __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2);
+
+ // If the number is <=-1, it doesn't round to -0
+ __ Move(double_temp2, -1.0);
+ __ BranchF64(&tail, nullptr, le, double_temp1, double_temp2);
+
+ // +0 doesn't round to -0.
+ __ FmoveHigh(temp1, double_temp1);
+ __ Branch(&tail, ne, temp1, Operand(0x80000000));
+
+ __ sll(temp1, slot, 1);
+ __ Addu(temp1, temp1, vector);
+ __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ sw(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ Branch(&tail);
+}
+
+
void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
__ Branch(&miss, eq, a1, Operand(t0));
+ // Some builtin functions require special handling, miss to the runtime.
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ Branch(&miss, ne, t0, Operand(Smi::FromInt(0)));
+
// Update stats.
__ lw(t0, FieldMemOperand(a2, with_types_offset));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
}
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallIC_RoundStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallIC_FloorStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallIC_CeilStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
}
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss) {
+ DCHECK(!temp.is(at));
+ lw(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ lw(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+ Branch(miss, ne, temp, Operand(Smi::FromInt(id)));
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss);
+
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
}
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+ Register function = a1;
+ Register vector = a2;
+ Register slot = a3;
+
+ Register temp1 = a0;
+ Register temp2 = a4;
+ DoubleRegister double_temp1 = f12;
+ DoubleRegister double_temp2 = f14;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss);
+
+ if (arg_count() > 0) {
+ __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ Move(double_temp2, 0.0);
+ __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2);
+
+ // If the number is <-.5, it doesn't round to -0
+ __ Move(double_temp2, -.5);
+ __ BranchF64(&tail, nullptr, lt, double_temp1, double_temp2);
+
+ // +0 doesn't round to -0
+ __ FmoveHigh(temp1, double_temp1);
+ __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000));
+
+ __ SmiScale(temp1, slot, kPointerSizeLog2);
+ __ Daddu(temp1, temp1, vector);
+ __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ sd(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ Branch(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+ Register function = a1;
+ Register vector = a2;
+ Register slot = a3;
+
+ Register temp1 = a0;
+ Register temp2 = a4;
+ DoubleRegister double_temp = f12;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+ if (arg_count() > 0) {
+ __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ ldc1(double_temp, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+ // Only -0 floors to -0.
+ __ FmoveHigh(temp1, double_temp);
+ __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000));
+ __ FmoveLow(temp1, double_temp);
+ __ Branch(&tail, ne, temp1, Operand(zero_reg));
+
+ __ SmiScale(temp1, slot, kPointerSizeLog2);
+ __ Daddu(temp1, temp1, vector);
+ __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ sd(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ Branch(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+ Register function = a1;
+ Register vector = a2;
+ Register slot = a3;
+
+ Register temp1 = a0;
+ Register temp2 = a4;
+ DoubleRegister double_temp1 = f12;
+ DoubleRegister double_temp2 = f14;
+ Label tail, miss;
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss);
+
+ if (arg_count() > 0) {
+ __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK);
+ __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ Move(double_temp2, 0.0);
+ __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2);
+
+ // If the number is <=-1, it doesn't round to -0
+ __ Move(double_temp2, -1.0);
+ __ BranchF64(&tail, nullptr, le, double_temp1, double_temp2);
+
+ // +0 doesn't round to -0.
+ __ FmoveHigh(temp1, double_temp1);
+ __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000));
+
+ __ SmiScale(temp1, slot, kPointerSizeLog2);
+ __ Daddu(temp1, temp1, vector);
+ __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel)));
+ __ sd(temp2,
+ FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize));
+ }
+
+ __ bind(&tail);
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unreachable");
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ Branch(&tail);
+}
+
+
void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
__ Branch(&miss, eq, a1, Operand(a4));
+ // Some builtin functions require special handling, miss to the runtime.
+ __ ld(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ Branch(&miss, ne, t0, Operand(Smi::FromInt(0)));
+
// Update stats.
__ ld(a4, FieldMemOperand(a2, with_types_offset));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
}
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallIC_RoundStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallIC_FloorStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallIC_CeilStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
}
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss) {
+ ld(temp, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ ld(temp, FieldMemOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+ Branch(miss, ne, temp, Operand(Smi::FromInt(id)));
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss);
+
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
"Logger::LeaveExternal");
Add(ExternalReference::address_of_minus_one_half().address(),
"double_constants.minus_one_half");
+ Add(ExternalReference::address_of_minus_one().address(),
+ "double_constants.minus_one");
Add(ExternalReference::stress_deopt_count(isolate).address(),
"Isolate::stress_deopt_count_address()");
#include "src/v8.h"
+#include "src/code-stubs.h"
#include "src/ic/ic.h"
#include "src/ic/ic-state.h"
#include "src/objects.h"
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
DCHECK(!FLAG_vector_ics ||
- GetFeedbackExtra() == *vector()->UninitializedSentinel(isolate));
+ GetFeedbackExtra() == *vector()->UninitializedSentinel(isolate) ||
+ GetFeedbackExtra() ==
+ Smi::FromInt(CallICStub::kHasReturnedMinusZeroSentinel));
if (feedback == *vector()->MegamorphicSentinel(isolate)) {
return GENERIC;
}
+void CallICNexus::ConfigureMonomorphicMathFunction(
+ Handle<JSFunction> function) {
+ Handle<WeakCell> new_cell = GetIsolate()->factory()->NewWeakCell(function);
+ SetFeedback(*new_cell);
+ SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate()));
+}
+
+
void CallICNexus::ConfigureUninitialized() {
SetFeedback(*vector()->UninitializedSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
void ConfigureUninitialized();
void ConfigureGeneric();
void ConfigureMonomorphicArray();
+ void ConfigureMonomorphicMathFunction(Handle<JSFunction> function);
void ConfigureMonomorphic(Handle<JSFunction> function);
InlineCacheState StateFromFeedback() const override;
}
+bool TypeFeedbackOracle::CallIsBuiltinWithMinusZeroResult(
+ FeedbackVectorICSlot slot) {
+ Handle<Object> value = GetInfo(slot);
+ if (!value->IsJSFunction()) return false;
+ Handle<JSFunction> maybe_round(Handle<JSFunction>::cast(value));
+ if (!maybe_round->shared()->HasBuiltinFunctionId()) return false;
+ if (maybe_round->shared()->builtin_function_id() != kMathRound &&
+ maybe_round->shared()->builtin_function_id() != kMathFloor &&
+ maybe_round->shared()->builtin_function_id() != kMathCeil) {
+ return false;
+ }
+ return feedback_vector_->get(feedback_vector_->GetIndex(slot) + 1) ==
+ Smi::FromInt(CallICStub::kHasReturnedMinusZeroSentinel);
+}
+
+
bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackVectorSlot slot) {
Handle<Object> info = GetInfo(slot);
return FLAG_pretenuring_call_new
bool StoreIsUninitialized(TypeFeedbackId id);
bool CallIsUninitialized(FeedbackVectorICSlot slot);
bool CallIsMonomorphic(FeedbackVectorICSlot slot);
+ bool CallIsBuiltinWithMinusZeroResult(FeedbackVectorICSlot slot);
bool KeyedArrayCallIsHoley(TypeFeedbackId id);
bool CallNewIsMonomorphic(FeedbackVectorSlot slot);
if (expr->IsUsingCallFeedbackICSlot(isolate())) {
FeedbackVectorICSlot slot = expr->CallFeedbackICSlot();
is_uninitialized = oracle()->CallIsUninitialized(slot);
- if (!expr->expression()->IsProperty() &&
- oracle()->CallIsMonomorphic(slot)) {
- expr->set_target(oracle()->GetCallTarget(slot));
- Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
- expr->set_allocation_site(site);
+ if (oracle()->CallIsMonomorphic(slot)) {
+ if (oracle()->CallIsBuiltinWithMinusZeroResult(slot)) {
+ expr->MarkShouldHandleMinusZeroResult();
+ }
+ if (!expr->expression()->IsProperty()) {
+ expr->set_target(oracle()->GetCallTarget(slot));
+ Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
+ expr->set_allocation_site(site);
+ }
}
}
}
+void CallIC_RoundStub::Generate(MacroAssembler* masm) {
+ Register function = rdi;
+ Register vector = rbx;
+ Register slot = rdx;
+
+ Register temp = rax;
+ XMMRegister xmm_temp1 = xmm1;
+ XMMRegister xmm_temp2 = xmm0;
+ Label tail, miss;
+
+ __ SmiToInteger64(slot, slot);
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp, kMathRound, &miss);
+
+ if (arg_count() > 0) {
+ __ movp(temp, Operand(rsp, arg_count() * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+ __ movsd(xmm_temp1, FieldOperand(temp, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ xorps(xmm_temp2, xmm_temp2);
+ __ ucomisd(xmm_temp1, xmm_temp2);
+ __ j(above, &tail, Label::kNear);
+
+ // If the number is <-.5, it doesn't round to -0
+ static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
+ __ movq(temp, minus_one_half);
+ __ movq(xmm_temp2, temp);
+ __ ucomisd(xmm_temp1, xmm_temp2);
+ __ j(below, &tail, Label::kNear);
+
+ // +0 doesn't round to -0
+ __ movmskpd(temp, xmm_temp1);
+ __ testl(temp, Immediate(1));
+ __ j(zero, &tail, Label::kNear);
+
+ __ Move(FieldOperand(vector, slot, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(kHasReturnedMinusZeroSentinel));
+ }
+
+ __ bind(&tail);
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ jmp(&tail);
+}
+
+
+void CallIC_FloorStub::Generate(MacroAssembler* masm) {
+ Register function = rdi;
+ Register vector = rbx;
+ Register slot = rdx;
+
+ Register temp1 = rax;
+ Register temp2 = rsi;
+ Label tail, miss;
+
+ __ SmiToInteger64(slot, slot);
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss);
+
+ if (arg_count() > 0) {
+ __ movp(temp1, Operand(rsp, arg_count() * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp1, map, &tail, DO_SMI_CHECK);
+
+ // Only -0 floors to -0.
+ __ movq(temp1, FieldOperand(temp1, HeapNumber::kValueOffset));
+ static int64_t minus_zero = V8_INT64_C(0x8000000000000000); // -0.0
+ __ movq(temp2, minus_zero);
+ __ cmpq(temp1, temp2);
+ __ j(not_equal, &tail);
+
+ __ Move(FieldOperand(vector, slot, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(kHasReturnedMinusZeroSentinel));
+ }
+
+ __ bind(&tail);
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ jmp(&tail);
+}
+
+
+void CallIC_CeilStub::Generate(MacroAssembler* masm) {
+ Register function = rdi;
+ Register vector = rbx;
+ Register slot = rdx;
+
+ Register temp = rax;
+ XMMRegister xmm_temp1 = xmm1;
+ XMMRegister xmm_temp2 = xmm0;
+ Label tail, miss;
+
+ __ SmiToInteger64(slot, slot);
+
+ // Ensure nobody has snuck in another function.
+ __ BranchIfNotBuiltin(function, temp, kMathCeil, &miss);
+
+ if (arg_count() > 0) {
+ __ movp(temp, Operand(rsp, arg_count() * kPointerSize));
+ Handle<Map> map = isolate()->factory()->heap_number_map();
+ __ CheckMap(temp, map, &tail, DO_SMI_CHECK);
+
+ __ movsd(xmm_temp1, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // If the number is >0, it doesn't round to -0
+ __ xorps(xmm_temp2, xmm_temp2);
+ __ ucomisd(xmm_temp1, xmm_temp2);
+ __ j(greater, &tail, Label::kNear);
+
+ // If the number is <=-1, it doesn't round to -0
+ static int64_t minus_one = V8_INT64_C(0xbff0000000000000); // -1
+ __ movq(temp, minus_one);
+ __ movq(xmm_temp2, temp);
+ __ ucomisd(xmm_temp1, xmm_temp2);
+ __ j(less_equal, &tail, Label::kNear);
+
+ // +0 doesn't round to -0.
+ __ movmskpd(temp, xmm_temp1);
+ __ testq(temp, Immediate(1));
+ __ j(zero, &tail, Label::kNear);
+
+ __ Move(FieldOperand(vector, slot, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(kHasReturnedMinusZeroSentinel));
+ }
+
+ __ bind(&tail);
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+ __ jmp(&tail);
+}
+
+
void CallICStub::Generate(MacroAssembler* masm) {
// rdi - function
// rdx - slot id
__ cmpp(rdi, rcx);
__ j(equal, &miss);
+ __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rax, FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+ __ Cmp(rax, Smi::FromInt(0));
+ __ j(not_equal, &miss);
+
// Update stats.
__ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
}
+void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, rbx);
+ CallIC_RoundStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, rbx);
+ CallIC_FloorStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, rbx);
+ CallIC_CeilStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
}
+void MacroAssembler::BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss) {
+ movp(temp, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movp(temp, FieldOperand(temp, SharedFunctionInfo::kFunctionDataOffset));
+ Cmp(temp, Smi::FromInt(id));
+ j(not_equal, miss);
+}
+
+
#define REG(Name) { kRegister_ ## Name ## _Code }
static const Register saved_regs[] = {
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void BranchIfNotBuiltin(Register function, Register temp,
+ BuiltinFunctionId id, Label* miss);
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+// Test that a -0 result doesn't cause deopt loops
+function noDeoptLoop(x) {
+ return Math.ceil(x);
+}
+assertEquals(1, noDeoptLoop(0.4));
+assertEquals(1, noDeoptLoop(0.4));
+assertEquals(1, noDeoptLoop(0.4));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(1, noDeoptLoop(0.4));
+assertEquals(1, noDeoptLoop(0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertUnoptimized(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertEquals(-1.0, 1/noDeoptLoop(-1.0));
+assertEquals(Infinity, 1/noDeoptLoop(0));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.1));
+assertOptimized(noDeoptLoop);
+%ClearFunctionTypeFeedback(noDeoptLoop);
+%DeoptimizeFunction(noDeoptLoop);
+
+// Test that ceil that goes megamorphic is handled correctly.
+function notCeil(x) {
+ return -x;
+}
+function testMega(f, x) {
+ return f(x);
+}
+assertEquals(8, testMega(Math.ceil, 7.4));
+assertEquals(8, testMega(Math.ceil, 7.4));
+assertEquals(8, testMega(Math.ceil, 7.4));
+assertEquals(-7.4, testMega(notCeil, 7.4));
+
+// Make sure that we can learn about ceil specialization from Cranskhaft, which
+// doesn't insert soft deopts for CallICs.
+function crankCeilLearn(x) {
+ return Math.ceil(x);
+}
+%OptimizeFunctionOnNextCall(crankCeilLearn);
+assertEquals(12, crankCeilLearn(11.3));
+assertOptimized(crankCeilLearn);
+assertEquals(-Infinity, 1/crankCeilLearn(-0.0));
+assertOptimized(crankCeilLearn);
+assertEquals(-Infinity, 1/crankCeilLearn(-0.75));
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+// Test that a -0 result doesn't cause deopt loops
+function noDeoptLoop(x) {
+ return Math.floor(x);
+}
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertUnoptimized(noDeoptLoop);
+assertEquals(-1, 1/noDeoptLoop(-1.0));
+assertEquals(-1, 1/noDeoptLoop(-0.9));
+assertEquals(-1, 1/noDeoptLoop(-0.4));
+assertEquals(-1, 1/noDeoptLoop(-0.5));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertOptimized(noDeoptLoop);
+%ClearFunctionTypeFeedback(noDeoptLoop);
+%DeoptimizeFunction(noDeoptLoop);
+
+// Test that floor that goes megamorphic is handled correctly.
+function notFloor(x) {
+ return -x;
+}
+function testMega(f, x) {
+ return f(x);
+}
+assertEquals(7, testMega(Math.floor, 7.4));
+assertEquals(7, testMega(Math.floor, 7.4));
+assertEquals(7, testMega(Math.floor, 7.4));
+assertEquals(-7.4, testMega(notFloor, 7.4));
+
+// Make sure that we can learn about floor specialization from Cranskhaft, which
+// doesn't insert soft deopts for CallICs.
+function crankFloorLearn(x) {
+ return Math.floor(x);
+}
+%OptimizeFunctionOnNextCall(crankFloorLearn);
+assertEquals(12, crankFloorLearn(12.3));
+assertOptimized(crankFloorLearn);
+assertEquals(-Infinity, 1/crankFloorLearn(-0.0));
+assertOptimized(crankFloorLearn);
+assertEquals(-Infinity, 1/crankFloorLearn(-0.0));
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+// Test that a -0 result doesn't cause deopt loops
+function noDeoptLoop(x) {
+ return Math.round(x);
+}
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(0, noDeoptLoop(0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.4));
+assertUnoptimized(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.0));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.5));
+assertEquals(-1, noDeoptLoop(-1));
+assertEquals(-1, noDeoptLoop(-0.51));
+%OptimizeFunctionOnNextCall(noDeoptLoop);
+assertEquals(-Infinity, 1/noDeoptLoop(-0.4));
+assertEquals(-Infinity, 1/noDeoptLoop(-0.4));
+assertOptimized(noDeoptLoop);
+%ClearFunctionTypeFeedback(noDeoptLoop);
+%DeoptimizeFunction(noDeoptLoop);
+
+// Test that round that goes megamorphic is handled correctly.
+function notRound(x) {
+ return -x;
+}
+function testMega(f, x) {
+ return f(x);
+}
+assertEquals(7, testMega(Math.round, 7.4));
+assertEquals(7, testMega(Math.round, 7.4));
+assertEquals(7, testMega(Math.round, 7.4));
+assertEquals(-7.4, testMega(notRound, 7.4));
+
+// Make sure that we can learn about round specialization from Cranskhaft, which
+// doesn't insert soft deopts for CallICs.
+function crankRoundLearn(x) {
+ return Math.round(x);
+}
+%OptimizeFunctionOnNextCall(crankRoundLearn);
+assertEquals(12, crankRoundLearn(12.3));
+assertOptimized(crankRoundLearn);
+assertEquals(-Infinity, 1/crankRoundLearn(-0.4));
+assertOptimized(crankRoundLearn);
+assertEquals(-Infinity, 1/crankRoundLearn(-0.4));