// handle this a bit differently.
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
+ __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ str(fp, MemOperand(ip));
+
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
__ stop("trap_on_deopt", condition);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);
+ __ Mov(x3, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ Str(fp, MemOperand(x3));
+
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSize) +
(saved_fp_registers.Count() * kDRegSize);
__ Bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load the base
__ Bind(&dont_trap);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Deoptimizer::JumpTableEntry* table_entry =
- new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
- !frame_is_built_);
+ new (zone()) Deoptimizer::JumpTableEntry(
+ entry, deopt_info, bailout_type, !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
}
+void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ CodeEntry* entry = code_map->FindEntry(start);
+ if (entry != NULL) {
+ entry->set_deopt_reason(deopt_reason);
+ entry->set_deopt_location(raw_position);
+ }
+}
+
+
void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->MoveCode(from, to);
}
#include "src/cpu-profiler-inl.h"
#include "src/compiler.h"
+#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/hashmap.h"
#include "src/log-inl.h"
}
+void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
+ int fp_to_sp_delta) {
+ TickSampleEventRecord record(last_code_event_id_);
+ RegisterState regs;
+ Address fp = isolate->c_entry_fp(isolate->thread_local_top());
+ regs.sp = fp - fp_to_sp_delta;
+ regs.fp = fp;
+ regs.pc = from;
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame);
+ ticks_from_vm_buffer_.Enqueue(record);
+}
+
+
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
TickSampleEventRecord record(last_code_event_id_);
RegisterState regs;
}
+void CpuProfiler::CodeDeoptEvent(Code* code, int bailout_id, Address pc,
+ int fp_to_sp_delta) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
+ CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, bailout_id);
+ rec->start = code->address();
+ rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
+ rec->raw_position = info.raw_position;
+ processor_->Enqueue(evt_rec);
+ processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
+}
+
+
void CpuProfiler::CodeDeleteEvent(Address from) {
}
class CpuProfilesCollection;
class ProfileGenerator;
-#define CODE_EVENTS_TYPE_LIST(V) \
- V(CODE_CREATION, CodeCreateEventRecord) \
- V(CODE_MOVE, CodeMoveEventRecord) \
- V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
- V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
- V(REPORT_BUILTIN, ReportBuiltinEventRecord)
+#define CODE_EVENTS_TYPE_LIST(V) \
+ V(CODE_CREATION, CodeCreateEventRecord) \
+ V(CODE_MOVE, CodeMoveEventRecord) \
+ V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
+ V(CODE_DEOPT, CodeDeoptEventRecord) \
+ V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
+ V(REPORT_BUILTIN, ReportBuiltinEventRecord)
class CodeEventRecord {
};
+class CodeDeoptEventRecord : public CodeEventRecord {
+ public:
+ Address start;
+ const char* deopt_reason;
+ int raw_position;
+
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
public:
Address from;
// Puts current stack into tick sample events buffer.
void AddCurrentStack(Isolate* isolate);
+ void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
virtual void CodeMovingGCEvent() {}
virtual void CodeMoveEvent(Address from, Address to);
virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
+ virtual void CodeDeoptEvent(Code* code, int bailout_id, Address pc,
+ int fp_to_sp_delta);
virtual void CodeDeleteEvent(Address from);
virtual void GetterCallbackEvent(Name* name, Address entry_point);
virtual void RegExpCodeCreateEvent(Code* code, String* source);
}
}
compiled_code_ = FindOptimizedCode(function, optimized_code);
-
#if DEBUG
DCHECK(compiled_code_ != NULL);
if (type == EAGER || type == SOFT || type == LAZY) {
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
+ if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
+ PROFILE(isolate_, CodeDeoptEvent(compiled_code_, bailout_id_, from_,
+ fp_to_sp_delta_));
+ }
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
input_->SetFrameType(frame_type);
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
- // Print some helpful diagnostic information.
- if (FLAG_log_timer_events &&
- compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
- LOG(isolate(), CodeDeoptEvent(compiled_code_));
- }
base::ElapsedTimer timer;
// Determine basic deoptimization information. The optimized frame is
#undef DEOPT_MESSAGES_TEXTS
return deopt_messages_[deopt_reason];
}
+
+
+Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, int bailout_id) {
+ int last_position = 0;
+ Isolate* isolate = code->GetIsolate();
+ Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
+ int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
+ RelocInfo::ModeMask(RelocInfo::POSITION) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ for (RelocIterator it(code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->rmode() == RelocInfo::POSITION) {
+ last_position = static_cast<int>(info->data());
+ } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
+ last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
+ } else if (last_reason != Deoptimizer::kNoReason) {
+ if ((bailout_id ==
+ Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
+ Deoptimizer::EAGER)) ||
+ (bailout_id ==
+ Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
+ Deoptimizer::SOFT)) ||
+ (bailout_id ==
+ Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
+ Deoptimizer::LAZY))) {
+ CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
+ return DeoptInfo(last_position, NULL, last_reason);
+ }
+ }
+ }
+ return DeoptInfo(0, NULL, Deoptimizer::kNoReason);
+}
} } // namespace v8::internal
static const char* GetDeoptReason(DeoptReason deopt_reason);
- struct Reason {
- Reason(int r, const char* m, DeoptReason d)
+ struct DeoptInfo {
+ DeoptInfo(int r, const char* m, DeoptReason d)
: raw_position(r), mnemonic(m), deopt_reason(d) {}
- bool operator==(const Reason& other) const {
+ bool operator==(const DeoptInfo& other) const {
return raw_position == other.raw_position &&
CStringEquals(mnemonic, other.mnemonic) &&
deopt_reason == other.deopt_reason;
}
- bool operator!=(const Reason& other) const { return !(*this == other); }
+ bool operator!=(const DeoptInfo& other) const { return !(*this == other); }
int raw_position;
const char* mnemonic;
DeoptReason deopt_reason;
};
+ static DeoptInfo GetDeoptInfo(Code* code, int bailout_id);
+
struct JumpTableEntry : public ZoneObject {
- inline JumpTableEntry(Address entry, const Reason& the_reason,
+ inline JumpTableEntry(Address entry, const DeoptInfo& deopt_info,
Deoptimizer::BailoutType type, bool frame)
: label(),
address(entry),
- reason(the_reason),
+ deopt_info(deopt_info),
bailout_type(type),
needs_frame(frame) {}
bool IsEquivalentTo(const JumpTableEntry& other) const {
return address == other.address && bailout_type == other.bailout_type &&
needs_frame == other.needs_frame &&
- (!FLAG_trace_deopt || reason == other.reason);
+ (!FLAG_trace_deopt || deopt_info == other.deopt_info);
}
Label label;
Address address;
- Reason reason;
+ DeoptInfo deopt_info;
Deoptimizer::BailoutType bailout_type;
bool needs_frame;
};
__ pushad();
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ __ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+
const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
kDoubleRegsSize;
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
__ bind(&done);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
}
-void LCodeGenBase::DeoptComment(const Deoptimizer::Reason& reason) {
- masm()->RecordDeoptReason(reason.deopt_reason, reason.raw_position);
+void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
+ masm()->RecordDeoptReason(deopt_info.deopt_reason, deopt_info.raw_position);
}
HGraph* graph() const;
void FPRINTF_CHECKING Comment(const char* format, ...);
- void DeoptComment(const Deoptimizer::Reason& reason);
+ void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
bool GenerateBody();
virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
}
-void Logger::CodeDeoptEvent(Code* code) {
- if (!log_->IsEnabled()) return;
- DCHECK(FLAG_log_internal_timer_events);
+void Logger::CodeDeoptEvent(Code* code, int bailout_id, Address from,
+ int fp_to_sp_delta) {
+ PROFILER_LOG(CodeDeoptEvent(code, bailout_id, from, fp_to_sp_delta));
+ if (!log_->IsEnabled() || !FLAG_log_internal_timer_events) return;
Log::MessageBuilder msg(log_);
int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
msg.Append("code-deopt,%ld,%d", since_epoch, code->CodeSize());
uintptr_t start,
uintptr_t end);
- void CodeDeoptEvent(Code* code);
+ void CodeDeoptEvent(Code* code, int bailout_id, Address from,
+ int fp_to_sp_delta);
void CurrentTimeEvent();
void TimerEvent(StartEnd se, const char* name);
}
}
+ __ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ sw(fp, MemOperand(a2));
+
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
__ bind(&skip);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ bind(&skip);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
void Code::PrintDeoptLocation(FILE* out, int bailout_id) {
- int last_position = 0;
- Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
- int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
- RelocInfo::ModeMask(RelocInfo::POSITION) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::POSITION) {
- last_position = static_cast<int>(info->data());
- } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
- last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
- } else if (last_reason != Deoptimizer::kNoReason) {
- if ((bailout_id == Deoptimizer::GetDeoptimizationId(
- GetIsolate(), info->target_address(), Deoptimizer::EAGER)) ||
- (bailout_id == Deoptimizer::GetDeoptimizationId(
- GetIsolate(), info->target_address(), Deoptimizer::SOFT)) ||
- (bailout_id == Deoptimizer::GetDeoptimizationId(
- GetIsolate(), info->target_address(), Deoptimizer::LAZY))) {
- CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
- PrintF(out, " ;;; deoptimize at %d: %s\n", last_position,
- Deoptimizer::GetDeoptReason(last_reason));
- return;
- }
- }
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, bailout_id);
+ if (info.deopt_reason != Deoptimizer::kNoReason || info.raw_position != 0) {
+ PrintF(out, " ;;; deoptimize at %d: %s\n", info.raw_position,
+ Deoptimizer::GetDeoptReason(info.deopt_reason));
}
}
DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
__ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
script_id_(v8::UnboundScript::kNoScriptId),
no_frame_ranges_(NULL),
bailout_reason_(kEmptyBailoutReason),
+ deopt_reason_(kEmptyBailoutReason),
+ deopt_location_(0),
line_info_(line_info),
instruction_start_(instruction_start) {}
void set_bailout_reason(const char* bailout_reason) {
bailout_reason_ = bailout_reason;
}
+ void set_deopt_reason(const char* deopt_reason) {
+ deopt_reason_ = deopt_reason;
+ }
+ void set_deopt_location(int location) { deopt_location_ = location; }
const char* bailout_reason() const { return bailout_reason_; }
static inline bool is_js_function_tag(Logger::LogEventsAndTags tag);
int script_id_;
List<OffsetRange>* no_frame_ranges_;
const char* bailout_reason_;
+ const char* deopt_reason_;
+ int deopt_location_;
JITLineInfoTable* line_info_;
Address instruction_start_;
const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
kDoubleRegsSize;
+ __ Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
+
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
__ bind(&done);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cc == no_condition && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
__ bind(&done);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
}
+static const v8::CpuProfileNode* GetSimpleBranch(v8::Isolate* isolate,
+ const v8::CpuProfileNode* node,
+ const char* names[],
+ int length) {
+ for (int i = 0; i < length; i++) {
+ node = GetChild(isolate, node, names[i]);
+ }
+ return node;
+}
+
+
static const char* cpu_profiler_test_source = "function loop(timeout) {\n"
" this.mmm = 0;\n"
" var start = Date.now();\n"
outer_profile = NULL;
CHECK_EQ(0, iprofiler->GetProfilesCount());
}
+
+
+static const char* collect_deopt_events_test_source =
+ "function opt_function(value) {\n"
+ " return value / 10;\n"
+ "}\n"
+ "\n"
+ "function test(value) {\n"
+ " return opt_function(value);\n"
+ "}\n"
+ "\n"
+ "startProfiling();\n"
+ "\n"
+ "for (var i = 0; i < 10; ++i) test(10);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(opt_function)\n"
+ "\n"
+ "for (var i = 0; i < 10; ++i) test(10);\n"
+ "\n"
+ "for (var i = 0; i < 10; ++i) test(undefined);\n"
+ "\n"
+ "stopProfiling();\n"
+ "\n";
+
+
+TEST(CollectDeoptEvents) {
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+
+ v8::Script::Compile(v8_str(collect_deopt_events_test_source))->Run();
+ i::CpuProfile* iprofile = iprofiler->GetProfile(0);
+ iprofile->Print();
+ v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
+ const char* branch[] = {"", "test", "opt_function"};
+ const v8::CpuProfileNode* opt_function = GetSimpleBranch(
+ env->GetIsolate(), profile->GetTopDownRoot(), branch, arraysize(branch));
+ CHECK(opt_function);
+ iprofiler->DeleteProfile(iprofile);
+}