// value with DontDelete properties. We have to deoptimize all contexts
// because of possible cross-context inlined functions.
if (self->IsJSGlobalProxy() || self->IsGlobalObject()) {
- i::Deoptimizer::DeoptimizeAll();
+ i::Deoptimizer::DeoptimizeAll(isolate);
}
EXCEPTION_PREAMBLE(isolate);
void Testing::DeoptimizeAll() {
i::Isolate* isolate = i::Isolate::Current();
i::HandleScope scope(isolate);
- internal::Deoptimizer::DeoptimizeAll();
+ internal::Deoptimizer::DeoptimizeAll(isolate);
}
bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
Deoptimizer::BailoutType type =
is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
} else {
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
}
-void RelocInfo::Print(FILE* out) {
+void RelocInfo::Print(Isolate* isolate, FILE* out) {
PrintF(out, "%p %s", pc_, RelocModeName(rmode_));
if (IsComment(rmode_)) {
PrintF(out, " (%s)", reinterpret_cast<char*>(data_));
} else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
} else if (IsRuntimeEntry(rmode_) &&
- Isolate::Current()->deoptimizer_data() != NULL) {
+ isolate->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
- target_address(), Deoptimizer::EAGER);
+ isolate, target_address(), Deoptimizer::EAGER);
if (id != Deoptimizer::kNotDeoptimizationEntry) {
PrintF(out, " (deoptimization bailout %d)", id);
}
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
- void Print(FILE* out);
+ void Print(Isolate* isolate, FILE* out);
#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
void Verify();
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
- Deoptimizer::DeoptimizeAll();
+ Deoptimizer::DeoptimizeAll(isolate_);
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
namespace v8 {
namespace internal {
-DeoptimizerData::DeoptimizerData() {
- eager_deoptimization_entry_code_entries_ = -1;
- lazy_deoptimization_entry_code_entries_ = -1;
- size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
- MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
- size_t initial_commit_size = OS::CommitPageSize();
- eager_deoptimization_entry_code_ =
- allocator->AllocateChunk(deopt_table_size,
- initial_commit_size,
- EXECUTABLE,
- NULL);
- lazy_deoptimization_entry_code_ =
- allocator->AllocateChunk(deopt_table_size,
- initial_commit_size,
- EXECUTABLE,
- NULL);
- current_ = NULL;
- deoptimizing_code_list_ = NULL;
+static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
+ return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
+ OS::CommitPageSize(),
+ EXECUTABLE,
+ NULL);
+}
+
+
+DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
+ : allocator_(allocator),
+ eager_deoptimization_entry_code_entries_(-1),
+ lazy_deoptimization_entry_code_entries_(-1),
+ eager_deoptimization_entry_code_(AllocateCodeChunk(allocator)),
+ lazy_deoptimization_entry_code_(AllocateCodeChunk(allocator)),
+ current_(NULL),
#ifdef ENABLE_DEBUGGER_SUPPORT
- deoptimized_frame_info_ = NULL;
+ deoptimized_frame_info_(NULL),
#endif
-}
+ deoptimizing_code_list_(NULL) { }
DeoptimizerData::~DeoptimizerData() {
- Isolate::Current()->memory_allocator()->Free(
- eager_deoptimization_entry_code_);
+ allocator_->Free(eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_ = NULL;
- Isolate::Current()->memory_allocator()->Free(
- lazy_deoptimization_entry_code_);
+ allocator_->Free(lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_ = NULL;
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
Address from,
int fp_to_sp_delta,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
Deoptimizer* deoptimizer = new Deoptimizer(isolate,
function,
type,
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
Deoptimizer* result = isolate->deoptimizer_data()->current_;
ASSERT(result != NULL);
result->DeleteFrameDescriptions();
JavaScriptFrame* frame,
int jsframe_index,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
ASSERT(frame->is_optimized());
ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
delete info;
isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
void Deoptimizer::VisitAllOptimizedFunctions(
+ Isolate* isolate,
OptimizedFunctionVisitor* visitor) {
AssertNoAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
- Object* context = Isolate::Current()->heap()->native_contexts_list();
+ Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
};
-void Deoptimizer::DeoptimizeAll() {
+void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
AssertNoAllocation no_allocation;
if (FLAG_trace_deopt) {
}
DeoptimizeAllFilter filter;
- DeoptimizeAllFunctionsWith(&filter);
+ DeoptimizeAllFunctionsWith(isolate, &filter);
}
}
-void Deoptimizer::DeoptimizeAllFunctionsWith(OptimizedFunctionFilter* filter) {
+void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
+ OptimizedFunctionFilter* filter) {
AssertNoAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
- Object* context = Isolate::Current()->heap()->native_contexts_list();
+ Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
DeoptimizeAllFunctionsForContext(Context::cast(context), filter);
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
GetEntryMode mode) {
ASSERT(id >= 0);
if (id >= kMaxNumberOfEntries) return NULL;
- MemoryChunk* base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(isolate, type, id);
} else {
ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
}
DeoptimizerData* data = isolate->deoptimizer_data();
- if (type == EAGER) {
- base = data->eager_deoptimization_entry_code_;
- } else {
- base = data->lazy_deoptimization_entry_code_;
- }
+ MemoryChunk* base = (type == EAGER)
+ ? data->eager_deoptimization_entry_code_
+ : data->lazy_deoptimization_entry_code_;
return base->area_start() + (id * table_entry_size_);
}
-int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- MemoryChunk* base = NULL;
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- if (type == EAGER) {
- base = data->eager_deoptimization_entry_code_;
- } else {
- base = data->lazy_deoptimization_entry_code_;
- }
+int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
+ Address addr,
+ BailoutType type) {
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ MemoryChunk* base = (type == EAGER)
+ ? data->eager_deoptimization_entry_code_
+ : data->lazy_deoptimization_entry_code_;
Address start = base->area_start();
if (base == NULL ||
addr < start ||
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- MemoryChunk* chunk = type == EAGER
+ MemoryChunk* chunk = (type == EAGER)
? data->eager_deoptimization_entry_code_
: data->lazy_deoptimization_entry_code_;
ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
Code* code) {
SharedFunctionInfo* shared = function->shared();
- Object* undefined = Isolate::Current()->heap()->undefined_value();
+ Object* undefined = function->GetHeap()->undefined_value();
Object* current = function;
while (current != undefined) {
}
-Handle<ByteArray> TranslationBuffer::CreateByteArray() {
+Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
int length = contents_.length();
- Handle<ByteArray> result =
- Isolate::Current()->factory()->NewByteArray(length, TENURED);
+ Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
return result;
}
DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = code->GetIsolate()->global_handles();
// Globalize the code object and make it weak.
code_ = Handle<Code>::cast(global_handles->Create(code));
global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = code_->GetIsolate()->global_handles();
global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
}
expression_stack_ = new Object*[expression_count_];
// Get the source position using the unoptimized code.
Address pc = reinterpret_cast<Address>(output_frame->GetPc());
- Code* code = Code::cast(Isolate::Current()->heap()->FindCodeObject(pc));
+ Code* code = Code::cast(deoptimizer->isolate()->heap()->FindCodeObject(pc));
source_position_ = code->SourcePosition(pc);
for (int i = 0; i < expression_count_; i++) {
class DeoptimizerData {
public:
- DeoptimizerData();
+ explicit DeoptimizerData(MemoryAllocator* allocator);
~DeoptimizerData();
#ifdef ENABLE_DEBUGGER_SUPPORT
void RemoveDeoptimizingCode(Code* code);
private:
+ MemoryAllocator* allocator_;
int eager_deoptimization_entry_code_entries_;
int lazy_deoptimization_entry_code_entries_;
MemoryChunk* eager_deoptimization_entry_code_;
static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
// Deoptimize all functions in the heap.
- static void DeoptimizeAll();
+ static void DeoptimizeAll(Isolate* isolate);
static void DeoptimizeGlobalObject(JSObject* object);
- static void DeoptimizeAllFunctionsWith(OptimizedFunctionFilter* filter);
+ static void DeoptimizeAllFunctionsWith(Isolate* isolate,
+ OptimizedFunctionFilter* filter);
static void DeoptimizeAllFunctionsForContext(
Context* context, OptimizedFunctionFilter* filter);
static void VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor);
- static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
+ static void VisitAllOptimizedFunctions(Isolate* isolate,
+ OptimizedFunctionVisitor* visitor);
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
int id,
BailoutType type,
GetEntryMode mode = ENSURE_ENTRY_CODE);
- static int GetDeoptimizationId(Address addr, BailoutType type);
+ static int GetDeoptimizationId(Isolate* isolate,
+ Address addr,
+ BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data,
BailoutId node_id,
SharedFunctionInfo* shared);
BailoutType type,
int max_entry_id);
+ Isolate* isolate() const { return isolate_; }
+
private:
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
int CurrentIndex() const { return contents_.length(); }
void Add(int32_t value, Zone* zone);
- Handle<ByteArray> CreateByteArray();
+ Handle<ByteArray> CreateByteArray(Factory* factory);
private:
ZoneList<uint8_t> contents_;
isolate->deoptimizer_data() != NULL) {
// A runtime entry reloinfo might be a deoptimization bailout.
Address addr = relocinfo.target_address();
- int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
+ int id = Deoptimizer::GetDeoptimizationId(isolate,
+ addr,
+ Deoptimizer::EAGER);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
- id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
+ id = Deoptimizer::GetDeoptimizationId(isolate,
+ addr,
+ Deoptimizer::LAZY);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
} else {
bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
Deoptimizer::BailoutType type =
is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
} else {
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
return false;
}
- deoptimizer_data_ = new DeoptimizerData;
+ deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
const bool create_heap_objects = (des == NULL);
if (create_heap_objects && !heap_.CreateHeapObjects()) {
AssertNoAllocation no_allocation;
DependentFunctionFilter filter(function_info);
- Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
+ Deoptimizer::DeoptimizeAllFunctionsWith(function_info->GetIsolate(), &filter);
}
EnumerateOptimizedFunctionsVisitor visitor(sfis,
code_objects,
&compiled_funcs_count);
- Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+ Deoptimizer::VisitAllOptimizedFunctions(heap->isolate(), &visitor);
return compiled_funcs_count;
}
void MarkCompactCollector::StartSweeperThreads() {
sweeping_pending_ = true;
for (int i = 0; i < FLAG_sweeper_threads; i++) {
- heap()->isolate()->sweeper_threads()[i]->StartSweeping();
+ isolate()->sweeper_threads()[i]->StartSweeping();
}
}
void MarkCompactCollector::WaitUntilSweepingCompleted() {
ASSERT(sweeping_pending_ == true);
for (int i = 0; i < FLAG_sweeper_threads; i++) {
- heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+ isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
sweeping_pending_ = false;
StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
StealMemoryFromSweeperThreads(PagedSpace* space) {
intptr_t freed_bytes = 0;
for (int i = 0; i < FLAG_sweeper_threads; i++) {
- freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
+ freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
}
space->AddToAccountingStats(freed_bytes);
space->DecrementUnsweptFreeBytes(freed_bytes);
bool MarkCompactCollector::AreSweeperThreadsActivated() {
- return heap()->isolate()->sweeper_threads() != NULL;
+ return isolate()->sweeper_threads() != NULL;
}
void MarkCompactCollector::MarkInParallel() {
for (int i = 0; i < FLAG_marking_threads; i++) {
- heap()->isolate()->marking_threads()[i]->StartMarking();
+ isolate()->marking_threads()[i]->StartMarking();
}
}
void MarkCompactCollector::WaitUntilMarkingCompleted() {
for (int i = 0; i < FLAG_marking_threads; i++) {
- heap()->isolate()->marking_threads()[i]->WaitForMarkingThread();
+ isolate()->marking_threads()[i]->WaitForMarkingThread();
}
}
// force lazy re-initialization of it. This must be done after the
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
- heap()->isolate()->stub_cache()->Clear();
+ isolate()->stub_cache()->Clear();
DeoptimizeMarkedCodeFilter filter;
- Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
+ Deoptimizer::DeoptimizeAllFunctionsWith(isolate(), &filter);
}
void MarkCompactCollector::MarkImplicitRefGroups() {
List<ImplicitRefGroup*>* ref_groups =
- heap()->isolate()->global_handles()->implicit_ref_groups();
+ isolate()->global_handles()->implicit_ref_groups();
int last = 0;
for (int i = 0; i < ref_groups->length(); i++) {
bool work_to_do = true;
ASSERT(marking_deque_.IsEmpty());
while (work_to_do) {
- heap()->isolate()->global_handles()->IterateObjectGroups(
+ isolate()->global_handles()->IterateObjectGroups(
visitor, &IsUnmarkedHeapObjectWithHeap);
MarkImplicitRefGroups();
work_to_do = !marking_deque_.IsEmpty();
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
- PostponeInterruptsScope postpone(heap()->isolate());
+ PostponeInterruptsScope postpone(isolate());
bool incremental_marking_overflowed = false;
IncrementalMarking* incremental_marking = heap_->incremental_marking();
}
}
} else if (dest == CODE_SPACE) {
- PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
+ PROFILE(isolate(), CodeMoveEvent(src, dst));
heap()->MoveBlock(dst, src, size);
SlotsBuffer::AddTo(&slots_buffer_allocator_,
&migration_slots_buffer_,
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (heap()->isolate()->debug()->IsLoaded() ||
- heap()->isolate()->debug()->has_break_points()) {
+ if (isolate()->debug()->IsLoaded() ||
+ isolate()->debug()->has_break_points()) {
enable = false;
}
#endif
if (enable) {
if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(heap()->isolate());
+ code_flusher_ = new CodeFlusher(isolate());
} else {
if (code_flusher_ == NULL) return;
code_flusher_->EvictAllCandidates();
}
+Isolate* MarkCompactCollector::isolate() const {
+ return heap_->isolate();
+}
+
+
void MarkCompactCollector::Initialize() {
MarkCompactMarkingVisitor::Initialize();
IncrementalMarking::Initialize();
void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
if (is_compacting()) {
- Code* host = heap()->isolate()->inner_pointer_to_code_cache()->
+ Code* host = isolate()->inner_pointer_to_code_cache()->
GcSafeFindCodeForInnerPointer(pc);
MarkBit mark_bit = Marking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
static inline bool IsMarked(Object* obj);
inline Heap* heap() const { return heap_; }
+ inline Isolate* isolate() const;
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
Deoptimizer::BailoutType type =
is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
} else {
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
void Map::NotifyLeafMapLayoutChange() {
dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(),
DependentCode::kPrototypeCheckGroup);
}
last_comment = reinterpret_cast<const char*>(info->data());
} else if (last_comment != NULL &&
bailout_id == Deoptimizer::GetDeoptimizationId(
- info->target_address(), Deoptimizer::EAGER)) {
+ GetIsolate(), info->target_address(), Deoptimizer::EAGER)) {
CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
PrintF(" %s\n", last_comment);
return;
}
PrintF("RelocInfo (size = %d)\n", relocation_size());
- for (RelocIterator it(this); !it.done(); it.next()) it.rinfo()->Print(out);
+ for (RelocIterator it(this); !it.done(); it.next()) {
+ it.rinfo()->Print(GetIsolate(), out);
+ }
PrintF(out, "\n");
}
#endif // ENABLE_DISASSEMBLER
void DependentCode::DeoptimizeDependentCodeGroup(
+ Isolate* isolate,
DependentCode::DependencyGroup group) {
AssertNoAllocation no_allocation_scope;
DependentCode::GroupStartIndexes starts(this);
}
set_number_of_entries(group, 0);
DeoptimizeDependentCodeFilter filter;
- Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
+ Deoptimizer::DeoptimizeAllFunctionsWith(isolate, &filter);
}
static Handle<DependentCode> Insert(Handle<DependentCode> entries,
DependencyGroup group,
Handle<Code> value);
- void DeoptimizeDependentCodeGroup(DependentCode::DependencyGroup group);
+ void DeoptimizeDependentCodeGroup(Isolate* isolate,
+ DependentCode::DependencyGroup group);
// The following low-level accessors should only be used by this class
// and the mark compact collector.
bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
Deoptimizer::BailoutType type =
is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
} else {
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
- Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
+ Debug* debug = isolate->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
// Perform a full deoptimization when the specified number of
// breaks have been hit.
if (break_point_hit_count == break_point_hit_count_deoptimize) {
- i::Deoptimizer::DeoptimizeAll();
+ i::Deoptimizer::DeoptimizeAll(isolate);
}
} else if (event == v8::AfterCompile && !compiled_script_data.IsEmpty()) {
const int argc = 1;
v8::Handle<v8::Object> exec_state,
v8::Handle<v8::Object> event_data,
v8::Handle<v8::Value> data) {
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
+ v8::internal::Debug* debug = isolate->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
// Perform a full deoptimization when the specified number of
// breaks have been hit.
if (break_point_hit_count == break_point_hit_count_deoptimize) {
- i::Deoptimizer::DeoptimizeAll();
+ i::Deoptimizer::DeoptimizeAll(isolate);
}
}
}
v8::Handle<v8::String> function_name(result->ToString());
function_name->WriteAscii(fn);
if (strcmp(fn, "bar") == 0) {
- i::Deoptimizer::DeoptimizeAll();
+ i::Deoptimizer::DeoptimizeAll(v8::internal::Isolate::Current());
debug_event_break_deoptimize_done = true;
}
}