#undef WRITE_CALL_2_VOID
+double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
+ // TODO(ulan): This clobbers only subset of registers depending on compiler,
+ // Rewrite this in assembly to really clobber all registers.
+ // GCC for ia32 uses the FPU and does not touch XMM registers.
+ return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
+}
+
+
} } // namespace v8::internal
};
+double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
+
+
+#ifdef DEBUG
+#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
+#else
+#define CLOBBER_DOUBLE_REGISTERS()
+#endif
+
+
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
Type Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ CLOBBER_DOUBLE_REGISTERS(); \
Arguments args(args_length, args_object); \
return __RT_impl_##Name(args, isolate); \
} \
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
}
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
}
if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
// Possibly allocate a local context.
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (deopt_jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
__ b(&needs_frame);
__ mov(pc, ip);
}
} else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
}
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (condition == al && frame_is_built_) {
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (condition == al && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ jmp(&receiver_ok);
__ bind(&global_object);
- __ ldr(receiver, GlobalObjectOperand());
+ __ ldr(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ ldr(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ ldr(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
if (!type->prototype()->IsJSObject()) return false;
// Go up the prototype chain, recording where we are currently.
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
+ JSObject::TryMigrateInstance(holder_);
type = Handle<Map>(holder()->map());
}
}
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
+ static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
JSArrayBuilder* array_builder, ElementsKind kind) {
+ // Insert a bounds check because the number of arguments might exceed
+ // the kInitialMaxFastElementArray limit. This cannot happen for code
+ // that was parsed, but calling via Array.apply(thisArg, [...]) might
+ // trigger it.
+ HValue* length = GetArgumentsLength();
+ HConstant* max_alloc_length =
+ Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
+
// We need to fill with the hole if it's a smi array in the multi-argument
// case because we might have to bail out while copying arguments into
// the array because they aren't compatible with a smi array.
// If it's a double array, no problem, and if it's fast then no
// problem either because doubles are boxed.
- HValue* length = GetArgumentsLength();
bool fill_with_hole = IsFastSmiElementsKind(kind);
- HValue* new_object = array_builder->AllocateArray(length,
- length,
+ HValue* new_object = array_builder->AllocateArray(checked_length,
+ checked_length,
fill_with_hole);
HValue* elements = array_builder->GetElementsLocation();
ASSERT(elements != NULL);
context(),
LoopBuilder::kPostIncrement);
HValue* start = graph()->GetConstant0();
- HValue* key = builder.BeginBody(start, length, Token::LT);
+ HValue* key = builder.BeginBody(start, checked_length, Token::LT);
HInstruction* argument_elements = Add<HArgumentsElements>(false);
HInstruction* argument = Add<HAccessArgumentsAt>(
- argument_elements, length, key);
+ argument_elements, checked_length, key);
Add<HStoreKeyed>(elements, key, argument, kind);
builder.EndBody();
// strings over and over again.
var Date_cache = {
// Cached time value.
- time: NAN,
+ time: 0,
// String input for which the cached time is valid.
string: null
};
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ Code* notify_failure = NotifyStubFailureBuiltin();
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_failure->entry()));
}
// at the dynamic alignment state slot inside the frame.
bool HasAlignmentPadding(JSFunction* function);
+ // Select the version of NotifyStubFailure builtin that either saves or
+ // doesn't save the double registers depending on CPU features.
+ Code* NotifyStubFailureBuiltin();
+
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ popad();
// Tear down internal frame.
}
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+ } else {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+ }
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
+ Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
+ return isolate_->builtins()->builtin(name);
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
#endif
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ CpuFeatureScope scope(masm(), SSE2);
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movsd(MemOperand(esp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ CpuFeatureScope scope(masm(), SSE2);
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(esp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- Comment(";;; Save clobbered callee double registers");
- CpuFeatureScope scope(masm(), SSE2);
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movsd(MemOperand(esp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
}
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
__ jmp(&needs_frame);
__ ret(0); // Call the continuation without clobbering registers.
}
} else {
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
+ RestoreCallerDoubles();
+ }
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- ASSERT(NeedsEagerFrame());
- CpuFeatureScope scope(masm(), SSE2);
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(esp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
if (IsMarked(code) && !code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
+ code->InvalidateEmbeddedObjects();
have_code_to_deoptimize_ = true;
}
entries->clear_at(i);
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
+ Assembler::target_address_at(pc_ + Assembler::kInstrSize));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
- stub->instruction_start();
+ Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
+ stub->instruction_start());
}
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
- __ mov(a0, ra);
- // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ // Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0,
Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
- // Restore the original return address of the function
- __ mov(ra, at);
// The following registers must be saved and restored when calling through to
// the runtime:
// save/restore the registers without worrying about which of them contain
// pointers.
- __ mov(a0, ra);
- // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ // Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0,
Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
- // Restore the original return address of the function
- __ mov(ra, at);
// The following registers must be saved and restored when calling through to
// the runtime:
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
- Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ Address target_address = Assembler::target_address_at(
+ sequence + Assembler::kInstrSize);
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
- // Mark this code sequence for FindPlatformCodeAgeSequence()
+ // Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
- // Save the function's original return address
- // (it will be clobbered by Call(t9))
- patcher.masm()->mov(at, ra);
- // Load the stub address to t9 and call it
- patcher.masm()->li(t9,
- Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
- patcher.masm()->Call(t9);
- // Record the stub address in the empty space for GetCodeAgeAndParity()
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ patcher.masm()->li(
+ t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
+ CONSTANT_SIZE);
+ patcher.masm()->nop(); // Prevent jalr to jal optimization.
+ patcher.masm()->jalr(t9, a0);
+ patcher.masm()->nop(); // Branch delay slot nop.
+ patcher.masm()->nop(); // Pad the empty space.
}
}
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
#define __ masm()->
}
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
}
if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
// Possibly allocate a local context.
}
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (deopt_jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) {
__ Branch(&needs_frame);
} else {
__ Call(t9);
}
} else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
__ Call(t9);
}
}
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (condition == al && frame_is_built_) {
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (condition == al && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
// We often have several deopts to the same entry, reuse the last
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ Branch(&receiver_ok);
__ bind(&global_object);
- __ lw(receiver, GlobalObjectOperand());
+ __ lw(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ lw(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ lw(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
// Pre-age the code.
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
nop(Assembler::CODE_AGE_MARKER_NOP);
- // Save the function's original return address
- // (it will be clobbered by Call(t9))
- mov(at, ra);
- // Load the stub address to t9 and call it
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
li(t9,
- Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
- Call(t9);
- // Record the stub address in the empty space for GetCodeAgeAndParity()
- dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
+ CONSTANT_SIZE);
+ nop(); // Prevent jalr to jal optimization.
+ jalr(t9, a0);
+ nop(); // Branch delay slot nop.
+ nop(); // Pad the empty space.
} else {
Push(ra, fp, cp, a1);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
int64_t& i64hilo,
uint64_t& u64hilo,
int32_t& next_pc,
+ int32_t& return_addr_reg,
bool& do_interrupt) {
// Every local variable declared here needs to be const.
// This is to make sure that changed values are sent back to
case JR:
case JALR:
next_pc = get_register(instr->RsValue());
+ return_addr_reg = instr->RdValue();
break;
case SLL:
alu_out = rt << sa;
int32_t current_pc = get_pc();
// Next pc
int32_t next_pc = 0;
+ int32_t return_addr_reg = 31;
// Set up the variables if needed before executing the instruction.
ConfigureTypeRegister(instr,
i64hilo,
u64hilo,
next_pc,
+ return_addr_reg,
do_interrupt);
// ---------- Raise exceptions triggered.
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ set_register(return_addr_reg,
+ current_pc + 2 * Instruction::kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
int64_t& i64hilo,
uint64_t& u64hilo,
int32_t& next_pc,
+ int32_t& return_addr_reg,
bool& do_interrupt);
void DecodeTypeImmediate(Instruction* instr);
}
+void Code::InvalidateEmbeddedObjects() {
+ Object* undefined = GetHeap()->undefined_value();
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
+ }
+ }
+}
+
+
void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
void InvalidateRelocation();
+ void InvalidateEmbeddedObjects();
// [handler_table]: Fixed array containing offsets of exception handlers.
DECL_ACCESSORS(handler_table, FixedArray)
containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
}
int old_counter = containing_chunk->store_buffer_counter();
- if (old_counter == threshold) {
+ if (old_counter >= threshold) {
containing_chunk->set_scan_on_scavenge(true);
created_new_scan_on_scavenge_pages = true;
}
#define MAJOR_VERSION 3
#define MINOR_VERSION 22
#define BUILD_NUMBER 24
-#define PATCH_LEVEL 10
+#define PATCH_LEVEL 17
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ Popad();
// Tear down internal frame.
}
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
#endif
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movsd(MemOperand(rsp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(rsp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
}
if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movsd(MemOperand(rsp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
}
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
__ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
if (needs_frame.is_bound()) {
__ jmp(&needs_frame);
__ call(kScratchRegister);
}
} else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (cc == no_condition && frame_is_built_) {
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (cc == no_condition && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(rsp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var contextA = Realm.create();
+var date1 = Realm.eval(contextA, "new Date('Thu, 29 Aug 2013 00:00:00 UTC')");
+new Date('Thu, 29 Aug 2013 00:00:01 UTC');
+var date2 = Realm.eval(contextA, "new Date('Thu, 29 Aug 2013 00:00:00 UTC')");
+assertEquals(date1, date2);
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test to exceed the Heap::MaxRegularSpaceAllocationSize with an array
+// constructor call taking many arguments.
+
+function boom() {
+ var args = [];
+ for (var i = 0; i < 125000; i++) {
+ args.push(i);
+ }
+ return Array.apply(Array, args);
+}
+
+var array = boom();
+
+assertEquals(125000, array.length);
+assertEquals(124999, array[124999]);
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function bar(a, b) { with(a) {return a + b;} }
+
+var obj = {
+ functions: [bar, bar, bar, bar],
+ receivers: [bar, bar, undefined, null],
+ foo: function () {
+ for (var a = this.functions, e = this.receivers, c = a.length,
+ d = 0; d < c ; d++) {
+ a[d].apply(e[d], arguments)
+ }
+ }
+}
+
+obj.foo(1, 2, 3, 4);
+obj.foo(1, 2, 3, 4);
+%OptimizeFunctionOnNextCall(obj.foo);
+obj.foo(1, 2, 3, 4);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+function boom() {
+ var args = [];
+ for (var i = 0; i < 125000; i++)
+ args.push(i);
+ return Array.apply(Array, args);
+}
+var array = boom();
+function fib(n) {
+ var f0 = 0, f1 = 1;
+ for (; n > 0; n = n - 1) {
+ f0 + f1;
+ f0 = array;
+ }
+}
+fib(12);
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ return 1;
+}
+function C1(f) {
+ this.f = f;
+}
+var o1 = new C1(f);
+var o2 = {__proto__: new C1(f) }
+function foo(o) {
+ return o.f();
+}
+foo(o1);
+foo(o1);
+foo(o2);
+foo(o1);
+var o3 = new C1(function() { return 2; });
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(1, foo(o2));
+o2.__proto__.f = function() { return 3; };
+assertEquals(3, foo(o2));