"Flush code caches in maps during mark compact cycle.")
DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
-DEFINE_bool(compact_code_space, false,
+DEFINE_bool(compact_code_space, true,
"Compact code space on full non-incremental collections")
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
isolate_->heap()->mark_compact_collector()->
RecordCodeEntrySlot(slot, target);
+ RecordSharedFunctionInfoCodeSlot(shared);
+
candidate = next_candidate;
}
candidate->set_code(lazy_compile);
}
+ RecordSharedFunctionInfoCodeSlot(candidate);
+
candidate = next_candidate;
}
shared_function_info_candidates_head_ = NULL;
}
+ void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
+ Object** slot = HeapObject::RawField(shared,
+ SharedFunctionInfo::kCodeOffset);
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(slot, slot, HeapObject::cast(*slot));
+ }
+
static JSFunction** GetNextCandidateField(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
candidate->address() + JSFunction::kCodeEntryOffset);